pull/1/head
sixcooler 13 years ago
commit 29c2289b5c

@ -694,6 +694,7 @@ crawlPause.remotesearch=10
crawler.clientTimeout=9000 crawler.clientTimeout=9000
# http crawler specific settings; size in bytes # http crawler specific settings; size in bytes
crawler.http.accept=text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
crawler.http.acceptEncoding=gzip crawler.http.acceptEncoding=gzip
crawler.http.acceptLanguage=en-us,en;q=0.5 crawler.http.acceptLanguage=en-us,en;q=0.5
crawler.http.acceptCharset=ISO-8859-1,utf-8;q=0.7,*;q=0.7 crawler.http.acceptCharset=ISO-8859-1,utf-8;q=0.7,*;q=0.7

@ -23,4 +23,7 @@ network.unit.protocol.control = uncontrolled
# white/blacklists # white/blacklists
network.unit.access.whitelist = 10\..*,127\..*,172\.(1[6-9]|2[0-9]|3[0-1])\..*,169\.254\..*,192\.168\..*,localhost network.unit.access.whitelist = 10\..*,127\..*,172\.(1[6-9]|2[0-9]|3[0-1])\..*,169\.254\..*,192\.168\..*,localhost
network.unit.access.blacklist = network.unit.access.blacklist =
# optional user agent to keep activities from out of a intranet environment secret
#network.unit.tenant.agent=Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)

@ -49,6 +49,7 @@ import net.yacy.kelondro.logging.Log;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import de.anomic.crawler.retrieval.HTTPLoader;
import de.anomic.data.WorkTables; import de.anomic.data.WorkTables;
public class RobotsTxt { public class RobotsTxt {
@ -308,7 +309,7 @@ public class RobotsTxt {
// adding referer // adding referer
reqHeaders.put(RequestHeader.REFERER, (MultiProtocolURI.newURL(robotsURL,"/")).toNormalform(true, true)); reqHeaders.put(RequestHeader.REFERER, (MultiProtocolURI.newURL(robotsURL,"/")).toNormalform(true, true));
reqHeaders.put(RequestHeader.ACCEPT, HTTPLoader.DEFAULT_ACCEPT);
if (entry != null) { if (entry != null) {
oldEtag = entry.getETag(); oldEtag = entry.getETag();
reqHeaders = new RequestHeader(); reqHeaders = new RequestHeader();

@ -1,4 +1,4 @@
// HTTPLoader.java // HTTPLoader.java
// --------------- // ---------------
// (C) by Michael Peter Christen; mc@yacy.net // (C) by Michael Peter Christen; mc@yacy.net
// first published on http://yacy.net // first published on http://yacy.net
@ -39,7 +39,6 @@ import net.yacy.kelondro.logging.Log;
import net.yacy.repository.Blacklist; import net.yacy.repository.Blacklist;
import net.yacy.search.Switchboard; import net.yacy.search.Switchboard;
import net.yacy.search.index.Segments; import net.yacy.search.index.Segments;
import de.anomic.crawler.CrawlProfile; import de.anomic.crawler.CrawlProfile;
import de.anomic.crawler.Latency; import de.anomic.crawler.Latency;
import de.anomic.crawler.ZURL.FailCategory; import de.anomic.crawler.ZURL.FailCategory;
@ -51,80 +50,82 @@ public final class HTTPLoader {
private static final String DEFAULT_ENCODING = "gzip,deflate"; private static final String DEFAULT_ENCODING = "gzip,deflate";
private static final String DEFAULT_LANGUAGE = "en-us,en;q=0.5"; private static final String DEFAULT_LANGUAGE = "en-us,en;q=0.5";
private static final String DEFAULT_CHARSET = "ISO-8859-1,utf-8;q=0.7,*;q=0.7"; private static final String DEFAULT_CHARSET = "ISO-8859-1,utf-8;q=0.7,*;q=0.7";
public static final String DEFAULT_ACCEPT = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8";
public static final int DEFAULT_MAXFILESIZE = 1024 * 1024 * 10; public static final int DEFAULT_MAXFILESIZE = 1024 * 1024 * 10;
public static final int DEFAULT_CRAWLING_RETRY_COUNT = 5; public static final int DEFAULT_CRAWLING_RETRY_COUNT = 5;
/** /**
* The socket timeout that should be used * The socket timeout that should be used
*/ */
private final int socketTimeout; private final int socketTimeout;
private final Switchboard sb; private final Switchboard sb;
private final Log log; private final Log log;
public HTTPLoader(final Switchboard sb, final Log theLog) { public HTTPLoader(final Switchboard sb, final Log theLog) {
this.sb = sb; this.sb = sb;
this.log = theLog; this.log = theLog;
// refreshing timeout value // refreshing timeout value
this.socketTimeout = (int) sb.getConfigLong("crawler.clientTimeout", 10000); this.socketTimeout = (int) sb.getConfigLong("crawler.clientTimeout", 10000);
} }
public Response load(final Request entry, final int maxFileSize, final boolean checkBlacklist) throws IOException { public Response load(final Request entry, final int maxFileSize, final boolean checkBlacklist) throws IOException {
long start = System.currentTimeMillis(); final long start = System.currentTimeMillis();
Response doc = load(entry, DEFAULT_CRAWLING_RETRY_COUNT, maxFileSize, checkBlacklist); final Response doc = load(entry, DEFAULT_CRAWLING_RETRY_COUNT, maxFileSize, checkBlacklist);
Latency.update(entry.url(), System.currentTimeMillis() - start); Latency.update(entry.url(), System.currentTimeMillis() - start);
return doc; return doc;
} }
private Response load(final Request request, final int retryCount, final int maxFileSize, final boolean checkBlacklist) throws IOException { private Response load(final Request request, final int retryCount, final int maxFileSize, final boolean checkBlacklist) throws IOException {
if (retryCount < 0) { if (retryCount < 0) {
sb.crawlQueues.errorURL.push(request, sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "redirection counter exceeded", -1); this.sb.crawlQueues.errorURL.push(request, this.sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "redirection counter exceeded", -1);
throw new IOException("Redirection counter exceeded for URL " + request.url().toString() + ". Processing aborted."); throw new IOException("Redirection counter exceeded for URL " + request.url().toString() + ". Processing aborted.");
} }
DigestURI url = request.url(); DigestURI url = request.url();
final String host = url.getHost(); final String host = url.getHost();
if (host == null || host.length() < 2) throw new IOException("host is not well-formed: '" + host + "'"); if (host == null || host.length() < 2) throw new IOException("host is not well-formed: '" + host + "'");
final String path = url.getFile(); final String path = url.getFile();
int port = url.getPort(); int port = url.getPort();
final boolean ssl = url.getProtocol().equals("https"); final boolean ssl = url.getProtocol().equals("https");
if (port < 0) port = (ssl) ? 443 : 80; if (port < 0) port = (ssl) ? 443 : 80;
// check if url is in blacklist // check if url is in blacklist
final String hostlow = host.toLowerCase(); final String hostlow = host.toLowerCase();
if (checkBlacklist && Switchboard.urlBlacklist.isListed(Blacklist.BLACKLIST_CRAWLER, hostlow, path)) { if (checkBlacklist && Switchboard.urlBlacklist.isListed(Blacklist.BLACKLIST_CRAWLER, hostlow, path)) {
sb.crawlQueues.errorURL.push(request, sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.FINAL_LOAD_CONTEXT, "url in blacklist", -1); this.sb.crawlQueues.errorURL.push(request, this.sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.FINAL_LOAD_CONTEXT, "url in blacklist", -1);
throw new IOException("CRAWLER Rejecting URL '" + request.url().toString() + "'. URL is in blacklist."); throw new IOException("CRAWLER Rejecting URL '" + request.url().toString() + "'. URL is in blacklist.");
} }
// resolve yacy and yacyh domains // resolve yacy and yacyh domains
AlternativeDomainNames yacyResolver = HTTPDemon.getAlternativeResolver(); final AlternativeDomainNames yacyResolver = HTTPDemon.getAlternativeResolver();
if(yacyResolver != null) { if(yacyResolver != null) {
String yAddress = yacyResolver.resolve(host); final String yAddress = yacyResolver.resolve(host);
if(yAddress != null) { if(yAddress != null) {
url = new DigestURI(url.getProtocol() + "://" + yAddress + path); url = new DigestURI(url.getProtocol() + "://" + yAddress + path);
} }
} }
// take a file from the net // take a file from the net
Response response = null; Response response = null;
// create a request header // create a request header
final RequestHeader requestHeader = new RequestHeader(); final RequestHeader requestHeader = new RequestHeader();
requestHeader.put(HeaderFramework.USER_AGENT, ClientIdentification.getUserAgent()); requestHeader.put(HeaderFramework.USER_AGENT, ClientIdentification.getUserAgent());
DigestURI refererURL = null; DigestURI refererURL = null;
if (request.referrerhash() != null) refererURL = sb.getURL(Segments.Process.LOCALCRAWLING, request.referrerhash()); if (request.referrerhash() != null) refererURL = this.sb.getURL(Segments.Process.LOCALCRAWLING, request.referrerhash());
if (refererURL != null) requestHeader.put(RequestHeader.REFERER, refererURL.toNormalform(true, true)); if (refererURL != null) requestHeader.put(RequestHeader.REFERER, refererURL.toNormalform(true, true));
requestHeader.put(HeaderFramework.ACCEPT_LANGUAGE, sb.getConfig("crawler.http.acceptLanguage", DEFAULT_LANGUAGE)); requestHeader.put(HeaderFramework.ACCEPT, this.sb.getConfig("crawler.http.accept", DEFAULT_ACCEPT));
requestHeader.put(HeaderFramework.ACCEPT_CHARSET, sb.getConfig("crawler.http.acceptCharset", DEFAULT_CHARSET)); requestHeader.put(HeaderFramework.ACCEPT_LANGUAGE, this.sb.getConfig("crawler.http.acceptLanguage", DEFAULT_LANGUAGE));
requestHeader.put(HeaderFramework.ACCEPT_ENCODING, sb.getConfig("crawler.http.acceptEncoding", DEFAULT_ENCODING)); requestHeader.put(HeaderFramework.ACCEPT_CHARSET, this.sb.getConfig("crawler.http.acceptCharset", DEFAULT_CHARSET));
requestHeader.put(HeaderFramework.ACCEPT_ENCODING, this.sb.getConfig("crawler.http.acceptEncoding", DEFAULT_ENCODING));
// HTTP-Client // HTTP-Client
final HTTPClient client = new HTTPClient(); final HTTPClient client = new HTTPClient();
client.setRedirecting(false); // we want to handle redirection ourselves, so we don't index pages twice client.setRedirecting(false); // we want to handle redirection ourselves, so we don't index pages twice
client.setTimout(socketTimeout); client.setTimout(this.socketTimeout);
client.setHeader(requestHeader.entrySet()); client.setHeader(requestHeader.entrySet());
// send request // send request
final byte[] responseBody = client.GETbytes(url, maxFileSize); final byte[] responseBody = client.GETbytes(url, maxFileSize);
@ -139,10 +140,10 @@ public final class HTTPLoader {
redirectionUrlString = redirectionUrlString.trim(); redirectionUrlString = redirectionUrlString.trim();
if (redirectionUrlString.length() == 0) { if (redirectionUrlString.length() == 0) {
sb.crawlQueues.errorURL.push(request, sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "redirection header empy", code); this.sb.crawlQueues.errorURL.push(request, this.sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "redirection header empy", code);
throw new IOException("CRAWLER Redirection of URL=" + request.url().toString() + " aborted. Location header is empty."); throw new IOException("CRAWLER Redirection of URL=" + request.url().toString() + " aborted. Location header is empty.");
} }
// normalizing URL // normalizing URL
final DigestURI redirectionUrl = new DigestURI(MultiProtocolURI.newURL(request.url(), redirectionUrlString)); final DigestURI redirectionUrl = new DigestURI(MultiProtocolURI.newURL(request.url(), redirectionUrlString));
@ -152,48 +153,48 @@ public final class HTTPLoader {
// if we are already doing a shutdown we don't need to retry crawling // if we are already doing a shutdown we don't need to retry crawling
if (Thread.currentThread().isInterrupted()) { if (Thread.currentThread().isInterrupted()) {
sb.crawlQueues.errorURL.push(request, sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.FINAL_LOAD_CONTEXT, "server shutdown", code); this.sb.crawlQueues.errorURL.push(request, this.sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.FINAL_LOAD_CONTEXT, "server shutdown", code);
throw new IOException("CRAWLER Retry of URL=" + request.url().toString() + " aborted because of server shutdown."); throw new IOException("CRAWLER Retry of URL=" + request.url().toString() + " aborted because of server shutdown.");
} }
// check if the url was already indexed // check if the url was already indexed
final String dbname = sb.urlExists(Segments.Process.LOCALCRAWLING, redirectionUrl.hash()); final String dbname = this.sb.urlExists(Segments.Process.LOCALCRAWLING, redirectionUrl.hash());
if (dbname != null) { if (dbname != null) {
sb.crawlQueues.errorURL.push(request, sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "redirection to double content", code); this.sb.crawlQueues.errorURL.push(request, this.sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "redirection to double content", code);
throw new IOException("CRAWLER Redirection of URL=" + request.url().toString() + " ignored. The url appears already in db " + dbname); throw new IOException("CRAWLER Redirection of URL=" + request.url().toString() + " ignored. The url appears already in db " + dbname);
} }
// retry crawling with new url // retry crawling with new url
request.redirectURL(redirectionUrl); request.redirectURL(redirectionUrl);
return load(request, retryCount - 1, maxFileSize, checkBlacklist); return load(request, retryCount - 1, maxFileSize, checkBlacklist);
} else { } else {
// no redirection url provided // no redirection url provided
sb.crawlQueues.errorURL.push(request, sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "no redirection url provided", code); this.sb.crawlQueues.errorURL.push(request, this.sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "no redirection url provided", code);
throw new IOException("REJECTED EMTPY REDIRECTION '" + client.getHttpResponse().getStatusLine() + "' for URL " + request.url().toString()); throw new IOException("REJECTED EMTPY REDIRECTION '" + client.getHttpResponse().getStatusLine() + "' for URL " + request.url().toString());
} }
} else if (responseBody == null) { } else if (responseBody == null) {
// no response, reject file // no response, reject file
sb.crawlQueues.errorURL.push(request, sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "no response body", code); this.sb.crawlQueues.errorURL.push(request, this.sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "no response body", code);
throw new IOException("REJECTED EMPTY RESPONSE BODY '" + client.getHttpResponse().getStatusLine() + "' for URL " + request.url().toString()); throw new IOException("REJECTED EMPTY RESPONSE BODY '" + client.getHttpResponse().getStatusLine() + "' for URL " + request.url().toString());
} else if (code == 200 || code == 203) { } else if (code == 200 || code == 203) {
// the transfer is ok // the transfer is ok
// we write the new cache entry to file system directly // we write the new cache entry to file system directly
long contentLength = responseBody.length; final long contentLength = responseBody.length;
ByteCount.addAccountCount(ByteCount.CRAWLER, contentLength); ByteCount.addAccountCount(ByteCount.CRAWLER, contentLength);
// check length again in case it was not possible to get the length before loading // check length again in case it was not possible to get the length before loading
if (maxFileSize > 0 && contentLength > maxFileSize) { if (maxFileSize > 0 && contentLength > maxFileSize) {
sb.crawlQueues.errorURL.push(request, sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.FINAL_PROCESS_CONTEXT, "file size limit exceeded", code); this.sb.crawlQueues.errorURL.push(request, this.sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.FINAL_PROCESS_CONTEXT, "file size limit exceeded", code);
throw new IOException("REJECTED URL " + request.url() + " because file size '" + contentLength + "' exceeds max filesize limit of " + maxFileSize + " bytes. (GET)"); throw new IOException("REJECTED URL " + request.url() + " because file size '" + contentLength + "' exceeds max filesize limit of " + maxFileSize + " bytes. (GET)");
} }
// create a new cache entry // create a new cache entry
final CrawlProfile profile = sb.crawler.getActive(request.profileHandle().getBytes()); final CrawlProfile profile = this.sb.crawler.getActive(request.profileHandle().getBytes());
response = new Response( response = new Response(
request, request,
requestHeader, requestHeader,
header, header,
Integer.toString(code), Integer.toString(code),
profile, profile,
responseBody responseBody
@ -202,37 +203,37 @@ public final class HTTPLoader {
return response; return response;
} else { } else {
// if the response has not the right response type then reject file // if the response has not the right response type then reject file
sb.crawlQueues.errorURL.push(request, sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "wrong http status code", code); this.sb.crawlQueues.errorURL.push(request, this.sb.peers.mySeed().hash.getBytes(), new Date(), 1, FailCategory.TEMPORARY_NETWORK_FAILURE, "wrong http status code", code);
throw new IOException("REJECTED WRONG STATUS TYPE '" + client.getHttpResponse().getStatusLine() + "' for URL " + request.url().toString()); throw new IOException("REJECTED WRONG STATUS TYPE '" + client.getHttpResponse().getStatusLine() + "' for URL " + request.url().toString());
} }
} }
public static Response load(final Request request) throws IOException { public static Response load(final Request request) throws IOException {
return load(request, 3); return load(request, 3);
} }
private static Response load(final Request request, int retryCount) throws IOException { private static Response load(final Request request, final int retryCount) throws IOException {
if (retryCount < 0) { if (retryCount < 0) {
throw new IOException("Redirection counter exceeded for URL " + request.url().toString() + ". Processing aborted."); throw new IOException("Redirection counter exceeded for URL " + request.url().toString() + ". Processing aborted.");
} }
final String host = request.url().getHost(); final String host = request.url().getHost();
if (host == null || host.length() < 2) throw new IOException("host is not well-formed: '" + host + "'"); if (host == null || host.length() < 2) throw new IOException("host is not well-formed: '" + host + "'");
final String path = request.url().getFile(); final String path = request.url().getFile();
int port = request.url().getPort(); int port = request.url().getPort();
final boolean ssl = request.url().getProtocol().equals("https"); final boolean ssl = request.url().getProtocol().equals("https");
if (port < 0) port = (ssl) ? 443 : 80; if (port < 0) port = (ssl) ? 443 : 80;
// check if url is in blacklist // check if url is in blacklist
final String hostlow = host.toLowerCase(); final String hostlow = host.toLowerCase();
if (Switchboard.urlBlacklist != null && Switchboard.urlBlacklist.isListed(Blacklist.BLACKLIST_CRAWLER, hostlow, path)) { if (Switchboard.urlBlacklist != null && Switchboard.urlBlacklist.isListed(Blacklist.BLACKLIST_CRAWLER, hostlow, path)) {
throw new IOException("CRAWLER Rejecting URL '" + request.url().toString() + "'. URL is in blacklist."); throw new IOException("CRAWLER Rejecting URL '" + request.url().toString() + "'. URL is in blacklist.");
} }
// take a file from the net // take a file from the net
Response response = null; Response response = null;
// create a request header // create a request header
final RequestHeader requestHeader = new RequestHeader(); final RequestHeader requestHeader = new RequestHeader();
requestHeader.put(HeaderFramework.USER_AGENT, ClientIdentification.getUserAgent()); requestHeader.put(HeaderFramework.USER_AGENT, ClientIdentification.getUserAgent());
@ -251,17 +252,17 @@ public final class HTTPLoader {
if (responseBody != null && (code == 200 || code == 203)) { if (responseBody != null && (code == 200 || code == 203)) {
// the transfer is ok // the transfer is ok
//statistics: //statistics:
ByteCount.addAccountCount(ByteCount.CRAWLER, responseBody.length); ByteCount.addAccountCount(ByteCount.CRAWLER, responseBody.length);
// we write the new cache entry to file system directly // we write the new cache entry to file system directly
// create a new cache entry // create a new cache entry
response = new Response( response = new Response(
request, request,
requestHeader, requestHeader,
header, header,
Integer.toString(code), Integer.toString(code),
null, null,
responseBody responseBody
@ -277,16 +278,16 @@ public final class HTTPLoader {
if (redirectionUrlString.length() == 0) { if (redirectionUrlString.length() == 0) {
throw new IOException("CRAWLER Redirection of URL=" + request.url().toString() + " aborted. Location header is empty."); throw new IOException("CRAWLER Redirection of URL=" + request.url().toString() + " aborted. Location header is empty.");
} }
// normalizing URL // normalizing URL
final DigestURI redirectionUrl = new DigestURI(MultiProtocolURI.newURL(request.url(), redirectionUrlString)); final DigestURI redirectionUrl = new DigestURI(MultiProtocolURI.newURL(request.url(), redirectionUrlString));
// if we are already doing a shutdown we don't need to retry crawling // if we are already doing a shutdown we don't need to retry crawling
if (Thread.currentThread().isInterrupted()) { if (Thread.currentThread().isInterrupted()) {
throw new IOException("CRAWLER Retry of URL=" + request.url().toString() + " aborted because of server shutdown."); throw new IOException("CRAWLER Retry of URL=" + request.url().toString() + " aborted because of server shutdown.");
} }
// retry crawling with new url // retry crawling with new url
request.redirectURL(redirectionUrl); request.redirectURL(redirectionUrl);
return load(request, retryCount - 1); return load(request, retryCount - 1);
@ -297,5 +298,5 @@ public final class HTTPLoader {
} }
return response; return response;
} }
} }

Loading…
Cancel
Save