enabled crawl starts with very large sets of start urls

i.e. 10MB large url list with approx 0.5 million start points
pull/419/head
Michael Peter Christen 4 years ago
parent c623a3252e
commit e81b770f79

@ -25,6 +25,7 @@ import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.Writer; import java.io.Writer;
import java.net.MalformedURLException; import java.net.MalformedURLException;
import java.util.ArrayList;
import java.util.Date; import java.util.Date;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
@ -207,7 +208,7 @@ public class Crawler_p {
prop.putHTML("info-queue_message", "pause reason: " + queuemessage); prop.putHTML("info-queue_message", "pause reason: " + queuemessage);
} }
if (post != null && post.containsKey("terminate")) try { if (post != null && post.containsKey("terminate")) try {
final String handle = post.get("handle", ""); final String handle = post.get("handle", "");
// termination of a crawl: shift the crawl from active to passive // termination of a crawl: shift the crawl from active to passive
final CrawlProfile p = sb.crawler.getActive(handle.getBytes()); final CrawlProfile p = sb.crawler.getActive(handle.getBytes());
@ -226,11 +227,11 @@ public class Crawler_p {
prop.put("info", "3"); prop.put("info", "3");
} else { } else {
if(post.getBoolean("cleanSearchCache")) { if(post.getBoolean("cleanSearchCache")) {
// clean up all search events // clean up all search events
SearchEventCache.cleanupEvents(true); SearchEventCache.cleanupEvents(true);
sb.index.clearCaches(); // every time the ranking is changed we need to remove old orderings sb.index.clearCaches(); // every time the ranking is changed we need to remove old orderings
} }
// remove crawlingFileContent before we record the call // remove crawlingFileContent before we record the call
String crawlingFileName = post.get("crawlingFile"); String crawlingFileName = post.get("crawlingFile");
@ -267,6 +268,7 @@ public class Crawler_p {
Set<DigestURL> rootURLs = new HashSet<DigestURL>(); Set<DigestURL> rootURLs = new HashSet<DigestURL>();
String crawlName = ""; String crawlName = "";
if (crawlingFile == null) for (String crawlingStart: rootURLs0) { if (crawlingFile == null) for (String crawlingStart: rootURLs0) {
StringBuilder crawlNameBuilder = new StringBuilder(); // for large crawl queues this can be pretty large
if (crawlingStart == null || crawlingStart.length() == 0) continue; if (crawlingStart == null || crawlingStart.length() == 0) continue;
// add the prefix http:// if necessary // add the prefix http:// if necessary
int pos = crawlingStart.indexOf("://",0); int pos = crawlingStart.indexOf("://",0);
@ -276,14 +278,14 @@ public class Crawler_p {
try { try {
DigestURL crawlingStartURL = new DigestURL(crawlingStart); DigestURL crawlingStartURL = new DigestURL(crawlingStart);
rootURLs.add(crawlingStartURL); rootURLs.add(crawlingStartURL);
crawlName += ((crawlingStartURL.getHost() == null) ? crawlingStartURL.toNormalform(true) : crawlingStartURL.getHost()) + ','; crawlNameBuilder.append((crawlingStartURL.getHost() == null) ? crawlingStartURL.toNormalform(true) : crawlingStartURL.getHost()).append(',');
if (crawlingStartURL != null && (crawlingStartURL.isFile() || crawlingStartURL.isSMB())) storeHTCache = false; if (crawlingStartURL != null && (crawlingStartURL.isFile() || crawlingStartURL.isSMB())) storeHTCache = false;
} catch (final MalformedURLException e) { } catch (final MalformedURLException e) {
ConcurrentLog.warn("Crawler_p", "crawl start url invalid: " + e.getMessage()); ConcurrentLog.warn("Crawler_p", "crawl start url invalid: " + e.getMessage());
} }
crawlName = crawlNameBuilder.toString();
} else { } else {
crawlName = crawlingFile.getName(); crawlName = crawlingFile.getName();
} }
if (crawlName.endsWith(",")) crawlName = crawlName.substring(0, crawlName.length() - 1); if (crawlName.endsWith(",")) crawlName = crawlName.substring(0, crawlName.length() - 1);
if (crawlName.length() > 64) { if (crawlName.length() > 64) {
@ -415,13 +417,15 @@ public class Crawler_p {
// delete all error urls for that domain // delete all error urls for that domain
// and all urls for that host from the crawl queue // and all urls for that host from the crawl queue
List<String> deleteIDs = new ArrayList<>();
Set<String> hosthashes = new HashSet<String>(); Set<String> hosthashes = new HashSet<String>();
boolean anysmbftporpdf = false; boolean anysmbftporpdf = false;
for (DigestURL u : rootURLs) { for (DigestURL u : rootURLs) {
sb.index.fulltext().remove(u.hash()); deleteIDs.add(new String(u.hash()));
hosthashes.add(u.hosthash()); hosthashes.add(u.hosthash());
if ("smb.ftp".indexOf(u.getProtocol()) >= 0 || "pdf".equals(MultiProtocolURL.getFileExtension(u.getFileName()))) anysmbftporpdf = true; if ("smb.ftp".indexOf(u.getProtocol()) >= 0 || "pdf".equals(MultiProtocolURL.getFileExtension(u.getFileName()))) anysmbftporpdf = true;
} }
sb.index.fulltext().remove(deleteIDs);
sb.crawlQueues.removeHosts(hosthashes); sb.crawlQueues.removeHosts(hosthashes);
sb.index.fulltext().commit(true); sb.index.fulltext().commit(true);
@ -458,8 +462,10 @@ public class Crawler_p {
// check if the crawl filter works correctly // check if the crawl filter works correctly
try { try {
Pattern mmp = Pattern.compile(newcrawlingMustMatch); Pattern mmp = Pattern.compile(newcrawlingMustMatch);
int maxcheck = 100;
for (DigestURL u: rootURLs) { for (DigestURL u: rootURLs) {
assert mmp.matcher(u.toNormalform(true)).matches() : "pattern " + mmp.toString() + " does not match url " + u.toNormalform(true); assert mmp.matcher(u.toNormalform(true)).matches() : "pattern " + mmp.toString() + " does not match url " + u.toNormalform(true);
if (maxcheck-- <= 0) break;
} }
} catch (final PatternSyntaxException e) { } catch (final PatternSyntaxException e) {
prop.put("info", "4"); // crawlfilter does not match url prop.put("info", "4"); // crawlfilter does not match url
@ -474,7 +480,7 @@ public class Crawler_p {
prop.put("info", "5"); //Crawling failed prop.put("info", "5"); //Crawling failed
prop.putHTML("info_crawlingURL", "(no url given)"); prop.putHTML("info_crawlingURL", "(no url given)");
prop.putHTML("info_reasonString", "you must submit at least one crawl url"); prop.putHTML("info_reasonString", "you must submit at least one crawl url");
hasCrawlstartDataOK = false; hasCrawlstartDataOK = false;
} }
} }
@ -487,10 +493,10 @@ public class Crawler_p {
String ignoreclassname_s = post.get("ignoreclassname"); String ignoreclassname_s = post.get("ignoreclassname");
Set<String> ignoreclassname = new HashSet<>(); Set<String> ignoreclassname = new HashSet<>();
if (ignoreclassname_s != null) { if (ignoreclassname_s != null) {
String[] ignoreclassname_a = ignoreclassname_s.trim().split(","); String[] ignoreclassname_a = ignoreclassname_s.trim().split(",");
for (int i = 0; i < ignoreclassname_a.length; i++) { for (int i = 0; i < ignoreclassname_a.length; i++) {
ignoreclassname.add(ignoreclassname_a[i].trim()); ignoreclassname.add(ignoreclassname_a[i].trim());
} }
} }
// get vocabulary scraper info // get vocabulary scraper info
@ -528,13 +534,13 @@ public class Crawler_p {
try { try {
if (newcrawlingdepth > 0) { if (newcrawlingdepth > 0) {
if (fullDomain) { if (fullDomain) {
/* Crawl is restricted to start domains or sub-paths : we have to get all the start links now. /* Crawl is restricted to start domains or sub-paths : we have to get all the start links now.
* Otherwise we can get them asynchronously later, thus allowing to handle more efficiently large start crawlingFiles */ * Otherwise we can get them asynchronously later, thus allowing to handle more efficiently large start crawlingFiles */
hyperlinks_from_file = crawlingFileStart(crawlingFile, timezoneOffset, crawlingFileContent); hyperlinks_from_file = crawlingFileStart(crawlingFile, timezoneOffset, crawlingFileContent);
newcrawlingMustMatch = CrawlProfile.siteFilter(hyperlinks_from_file); newcrawlingMustMatch = CrawlProfile.siteFilter(hyperlinks_from_file);
} else if (subPath) { } else if (subPath) {
/* Crawl is restricted to start domains or sub-paths : we have to get all the start links now. /* Crawl is restricted to start domains or sub-paths : we have to get all the start links now.
* Otherwise we can get them asynchronously later, thus allowing to handle more efficiently large start crawlingFiles */ * Otherwise we can get them asynchronously later, thus allowing to handle more efficiently large start crawlingFiles */
hyperlinks_from_file = crawlingFileStart(crawlingFile, timezoneOffset, crawlingFileContent); hyperlinks_from_file = crawlingFileStart(crawlingFile, timezoneOffset, crawlingFileContent);
newcrawlingMustMatch = CrawlProfile.subpathFilter(hyperlinks_from_file); newcrawlingMustMatch = CrawlProfile.subpathFilter(hyperlinks_from_file);
} }
@ -550,50 +556,47 @@ public class Crawler_p {
} }
/* If a solr query filter is defined, verify now its syntax and that the embedded Solr schema is available */ /* If a solr query filter is defined, verify now its syntax and that the embedded Solr schema is available */
final String solrQueryMustMatch = post.get(CrawlAttribute.INDEXING_SOLR_QUERY_MUSTMATCH.key, CrawlProfile.SOLR_MATCH_ALL_QUERY).trim(); final String solrQueryMustMatch = post.get(CrawlAttribute.INDEXING_SOLR_QUERY_MUSTMATCH.key, CrawlProfile.SOLR_MATCH_ALL_QUERY).trim();
final String solrQueryMustNotMatch = post.get(CrawlAttribute.INDEXING_SOLR_QUERY_MUSTNOTMATCH.key, CrawlProfile.SOLR_EMPTY_QUERY).trim(); final String solrQueryMustNotMatch = post.get(CrawlAttribute.INDEXING_SOLR_QUERY_MUSTNOTMATCH.key, CrawlProfile.SOLR_EMPTY_QUERY).trim();
if(!(solrQueryMustMatch.isEmpty() || CrawlProfile.SOLR_MATCH_ALL_QUERY.equals(solrQueryMustMatch)) || !CrawlProfile.SOLR_EMPTY_QUERY.equals(solrQueryMustNotMatch)) { if(!(solrQueryMustMatch.isEmpty() || CrawlProfile.SOLR_MATCH_ALL_QUERY.equals(solrQueryMustMatch)) || !CrawlProfile.SOLR_EMPTY_QUERY.equals(solrQueryMustNotMatch)) {
final EmbeddedInstance embeddedSolr = sb.index.fulltext().getEmbeddedInstance(); final EmbeddedInstance embeddedSolr = sb.index.fulltext().getEmbeddedInstance();
final SolrCore embeddedCore = embeddedSolr != null ? embeddedSolr.getDefaultCore() : null; final SolrCore embeddedCore = embeddedSolr != null ? embeddedSolr.getDefaultCore() : null;
final boolean embeddedSolrConnected = embeddedSolr != null && embeddedCore != null; final boolean embeddedSolrConnected = embeddedSolr != null && embeddedCore != null;
prop.put("noEmbeddedSolr", !embeddedSolrConnected); prop.put("noEmbeddedSolr", !embeddedSolrConnected);
if (embeddedSolrConnected) { if (embeddedSolrConnected) {
if(!(solrQueryMustMatch.isEmpty() || CrawlProfile.SOLR_MATCH_ALL_QUERY.equals(solrQueryMustMatch))) { if(!(solrQueryMustMatch.isEmpty() || CrawlProfile.SOLR_MATCH_ALL_QUERY.equals(solrQueryMustMatch))) {
try { try {
SingleDocumentMatcher.toLuceneQuery(solrQueryMustMatch, embeddedCore); SingleDocumentMatcher.toLuceneQuery(solrQueryMustMatch, embeddedCore);
} catch(final SyntaxError | SolrException e) { } catch(final SyntaxError | SolrException e) {
hasCrawlstartDataOK = false; hasCrawlstartDataOK = false;
prop.put("info", "10"); prop.put("info", "10");
prop.put("info_solrQuery", solrQueryMustMatch); prop.put("info_solrQuery", solrQueryMustMatch);
} catch(final RuntimeException e) { } catch(final RuntimeException e) {
hasCrawlstartDataOK = false; hasCrawlstartDataOK = false;
prop.put("info", "11"); prop.put("info", "11");
prop.put("info_solrQuery", solrQueryMustMatch); prop.put("info_solrQuery", solrQueryMustMatch);
} }
} }
if(!CrawlProfile.SOLR_EMPTY_QUERY.equals(solrQueryMustNotMatch)) { if(!CrawlProfile.SOLR_EMPTY_QUERY.equals(solrQueryMustNotMatch)) {
try { try {
SingleDocumentMatcher.toLuceneQuery(solrQueryMustNotMatch, embeddedCore); SingleDocumentMatcher.toLuceneQuery(solrQueryMustNotMatch, embeddedCore);
} catch(final SyntaxError | SolrException e) { } catch(final SyntaxError | SolrException e) {
hasCrawlstartDataOK = false; hasCrawlstartDataOK = false;
prop.put("info", "10"); prop.put("info", "10");
prop.put("info_solrQuery", solrQueryMustNotMatch); prop.put("info_solrQuery", solrQueryMustNotMatch);
} catch(final RuntimeException e) { } catch(final RuntimeException e) {
hasCrawlstartDataOK = false; hasCrawlstartDataOK = false;
prop.put("info", "11"); prop.put("info", "11");
prop.put("info_solrQuery", solrQueryMustNotMatch); prop.put("info_solrQuery", solrQueryMustNotMatch);
} }
} }
} else { } else {
hasCrawlstartDataOK = false; hasCrawlstartDataOK = false;
prop.put("info", "9"); prop.put("info", "9");
} }
}
}
// prepare a new crawling profile // prepare a new crawling profile
final CrawlProfile profile; final CrawlProfile profile;
@ -632,19 +635,18 @@ public class Crawler_p {
new VocabularyScraper(vocabulary_scraper), new VocabularyScraper(vocabulary_scraper),
timezoneOffset); timezoneOffset);
profile.put(CrawlAttribute.CRAWLER_ORIGIN_URL_MUSTMATCH.key, profile.put(CrawlAttribute.CRAWLER_ORIGIN_URL_MUSTMATCH.key,
post.get(CrawlAttribute.CRAWLER_ORIGIN_URL_MUSTMATCH.key, CrawlProfile.MATCH_ALL_STRING)); post.get(CrawlAttribute.CRAWLER_ORIGIN_URL_MUSTMATCH.key, CrawlProfile.MATCH_ALL_STRING));
profile.put(CrawlAttribute.CRAWLER_ORIGIN_URL_MUSTNOTMATCH.key, post profile.put(CrawlAttribute.CRAWLER_ORIGIN_URL_MUSTNOTMATCH.key, post
.get(CrawlAttribute.CRAWLER_ORIGIN_URL_MUSTNOTMATCH.key, CrawlProfile.MATCH_NEVER_STRING)); .get(CrawlAttribute.CRAWLER_ORIGIN_URL_MUSTNOTMATCH.key, CrawlProfile.MATCH_NEVER_STRING));
profile.put(CrawlAttribute.INDEXING_MEDIA_TYPE_MUSTMATCH.key, profile.put(CrawlAttribute.INDEXING_MEDIA_TYPE_MUSTMATCH.key,
post.get(CrawlAttribute.INDEXING_MEDIA_TYPE_MUSTMATCH.key, CrawlProfile.MATCH_ALL_STRING)); post.get(CrawlAttribute.INDEXING_MEDIA_TYPE_MUSTMATCH.key, CrawlProfile.MATCH_ALL_STRING));
profile.put(CrawlAttribute.INDEXING_MEDIA_TYPE_MUSTNOTMATCH.key, post profile.put(CrawlAttribute.INDEXING_MEDIA_TYPE_MUSTNOTMATCH.key, post
.get(CrawlAttribute.INDEXING_MEDIA_TYPE_MUSTNOTMATCH.key, CrawlProfile.MATCH_NEVER_STRING)); .get(CrawlAttribute.INDEXING_MEDIA_TYPE_MUSTNOTMATCH.key, CrawlProfile.MATCH_NEVER_STRING));
profile.put(CrawlAttribute.INDEXING_SOLR_QUERY_MUSTMATCH.key, solrQueryMustMatch); profile.put(CrawlAttribute.INDEXING_SOLR_QUERY_MUSTMATCH.key, solrQueryMustMatch);
profile.put(CrawlAttribute.INDEXING_SOLR_QUERY_MUSTNOTMATCH.key, solrQueryMustNotMatch); profile.put(CrawlAttribute.INDEXING_SOLR_QUERY_MUSTNOTMATCH.key, solrQueryMustNotMatch);
profile.put(CrawlAttribute.CRAWLER_ALWAYS_CHECK_MEDIA_TYPE.key, profile.put(CrawlAttribute.CRAWLER_ALWAYS_CHECK_MEDIA_TYPE.key,
post.getBoolean("crawlerAlwaysCheckMediaType")); post.getBoolean("crawlerAlwaysCheckMediaType"));
handle = ASCII.getBytes(profile.handle()); handle = ASCII.getBytes(profile.handle());
@ -659,13 +661,11 @@ public class Crawler_p {
handle = null; handle = null;
} }
// start the crawl // start the crawl
if(hasCrawlstartDataOK) { if (hasCrawlstartDataOK) {
final boolean wontReceiptRemoteRsults = crawlOrder && !sb.getConfigBool(SwitchboardConstants.CRAWLJOB_REMOTE, false);
final boolean wontReceiptRemoteRsults = crawlOrder && !sb.getConfigBool(SwitchboardConstants.CRAWLJOB_REMOTE, false);
if ("url".equals(crawlingMode)) { if ("url".equals(crawlingMode)) {
// stack requests // stack requests
sb.crawler.putActive(handle, profile); sb.crawler.putActive(handle, profile);
final Set<DigestURL> successurls = new HashSet<DigestURL>(); final Set<DigestURL> successurls = new HashSet<DigestURL>();
@ -709,59 +709,58 @@ public class Crawler_p {
prop.putHTML("info_reasonString", fr.toString()); prop.putHTML("info_reasonString", fr.toString());
} }
if (successurls.size() > 0) { if (successurls.size() > 0) {
sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL); sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
prop.put("wontReceiptRemoteResults", wontReceiptRemoteRsults);
prop.put("wontReceiptRemoteResults", wontReceiptRemoteRsults); }
} else if ("sitemap".equals(crawlingMode)) {
try {
final DigestURL sitemapURL = sitemapURLStr.indexOf("//") > 0 ? new DigestURL(sitemapURLStr) : new DigestURL(rootURLs.iterator().next(), sitemapURLStr); // fix for relative paths which should not exist but are used anyway
sb.crawler.putActive(handle, profile);
final SitemapImporter importer = new SitemapImporter(sb, sitemapURL, profile);
importer.start();
sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
prop.put("wontReceiptRemoteResults", wontReceiptRemoteRsults);
} catch (final Exception e) {
// mist
prop.put("info", "6");//Error with url
prop.putHTML("info_crawlingStart", sitemapURLStr);
prop.putHTML("info_error", e.getMessage());
ConcurrentLog.logException(e);
} }
} else if ("sitemap".equals(crawlingMode)) { } else if ("file".equals(crawlingMode)) {
try { if (post.containsKey("crawlingFile") && crawlingFile != null) {
final DigestURL sitemapURL = sitemapURLStr.indexOf("//") > 0 ? new DigestURL(sitemapURLStr) : new DigestURL(rootURLs.iterator().next(), sitemapURLStr); // fix for relative paths which should not exist but are used anyway try {
sb.crawler.putActive(handle, profile); if(newcrawlingdepth > 0 && (fullDomain || subPath)) {
final SitemapImporter importer = new SitemapImporter(sb, sitemapURL, profile); /* All links must have already been loaded because they are the part of the newcrawlingMustMatch filter */
importer.start(); if(hyperlinks_from_file != null) {
sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL); sb.crawler.putActive(handle, profile);
prop.put("wontReceiptRemoteResults", wontReceiptRemoteRsults); sb.crawlStacker.enqueueEntriesAsynchronous(sb.peers.mySeed().hash.getBytes(), profile.handle(), hyperlinks_from_file, profile.timezoneOffset());
} catch (final Exception e) { }
// mist } else {
prop.put("info", "6");//Error with url /* No restriction on domains or subpath : we scrape now links and asynchronously push them to the crawlStacker */
prop.putHTML("info_crawlingStart", sitemapURLStr); final String crawlingFileContent = post.get("crawlingFile$file", "");
prop.putHTML("info_error", e.getMessage()); final ContentScraper scraper = new ContentScraper(new DigestURL(crawlingFile), 10000000,
ConcurrentLog.logException(e); new HashSet<String>(), new VocabularyScraper(), profile.timezoneOffset());
} FileCrawlStarterTask crawlStarterTask = new FileCrawlStarterTask(crawlingFile, crawlingFileContent, scraper, profile,
} else if ("file".equals(crawlingMode)) { sb.crawlStacker, sb.peers.mySeed().hash.getBytes());
if (post.containsKey("crawlingFile") && crawlingFile != null) { sb.crawler.putActive(handle, profile);
try { crawlStarterTask.start();
if(newcrawlingdepth > 0 && (fullDomain || subPath)) { }
/* All links must have already been loaded because they are the part of the newcrawlingMustMatch filter */ } catch (final PatternSyntaxException e) {
if(hyperlinks_from_file != null) { prop.put("info", "4"); // crawlfilter does not match url
sb.crawler.putActive(handle, profile); prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch);
sb.crawlStacker.enqueueEntriesAsynchronous(sb.peers.mySeed().hash.getBytes(), profile.handle(), hyperlinks_from_file, profile.timezoneOffset()); prop.putHTML("info_error", e.getMessage());
} } catch (final Exception e) {
} else { // mist
/* No restriction on domains or subpath : we scrape now links and asynchronously push them to the crawlStacker */ prop.put("info", "7"); // Error with file
final String crawlingFileContent = post.get("crawlingFile$file", ""); prop.putHTML("info_crawlingStart", crawlingFileName);
final ContentScraper scraper = new ContentScraper(new DigestURL(crawlingFile), 10000000, prop.putHTML("info_error", e.getMessage());
new HashSet<String>(), new VocabularyScraper(), profile.timezoneOffset()); ConcurrentLog.logException(e);
FileCrawlStarterTask crawlStarterTask = new FileCrawlStarterTask(crawlingFile, crawlingFileContent, scraper, profile, }
sb.crawlStacker, sb.peers.mySeed().hash.getBytes()); sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
sb.crawler.putActive(handle, profile); prop.put("wontReceiptRemoteResults", wontReceiptRemoteRsults);
crawlStarterTask.start(); }
} }
} catch (final PatternSyntaxException e) {
prop.put("info", "4"); // crawlfilter does not match url
prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch);
prop.putHTML("info_error", e.getMessage());
} catch (final Exception e) {
// mist
prop.put("info", "7"); // Error with file
prop.putHTML("info_crawlingStart", crawlingFileName);
prop.putHTML("info_error", e.getMessage());
ConcurrentLog.logException(e);
}
sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
prop.put("wontReceiptRemoteResults", wontReceiptRemoteRsults);
}
}
} }
} }
} }
@ -826,7 +825,7 @@ public class Crawler_p {
String hosts = ""; String hosts = "";
for (final byte[] h: sb.crawler.getActive()) { for (final byte[] h: sb.crawler.getActive()) {
profile = sb.crawler.getActive(h); profile = sb.crawler.getActive(h);
if (CrawlSwitchboard.DEFAULT_PROFILES.contains(profile.name())) continue; if (CrawlSwitchboard.DEFAULT_PROFILES.contains(profile.name())) continue;
profile.putProfileEntry("crawlProfilesShow_list_", prop, true, dark, count, domlistlength); profile.putProfileEntry("crawlProfilesShow_list_", prop, true, dark, count, domlistlength);
prop.put("crawlProfilesShow_list_" + count + "_debug", debug ? 1 : 0); prop.put("crawlProfilesShow_list_" + count + "_debug", debug ? 1 : 0);
if (debug) { if (debug) {
@ -877,47 +876,47 @@ public class Crawler_p {
* @throws IOException * @throws IOException
* @throws FileNotFoundException * @throws FileNotFoundException
*/ */
private static List<AnchorURL> crawlingFileStart(final File crawlingFile, int timezoneOffset, private static List<AnchorURL> crawlingFileStart(final File crawlingFile, int timezoneOffset,
final String crawlingFileContent) throws MalformedURLException, IOException, FileNotFoundException { final String crawlingFileContent) throws MalformedURLException, IOException, FileNotFoundException {
List<AnchorURL> hyperlinks_from_file; List<AnchorURL> hyperlinks_from_file;
// check if the crawl filter works correctly // check if the crawl filter works correctly
final ContentScraper scraper = new ContentScraper(new DigestURL(crawlingFile), 10000000, new HashSet<String>(), new VocabularyScraper(), timezoneOffset); final ContentScraper scraper = new ContentScraper(new DigestURL(crawlingFile), 10000000, new HashSet<String>(), new VocabularyScraper(), timezoneOffset);
final Writer writer = new TransformerWriter(null, null, scraper, false); final Writer writer = new TransformerWriter(null, null, scraper, false);
if((crawlingFileContent == null || crawlingFileContent.isEmpty()) && crawlingFile != null) { if((crawlingFileContent == null || crawlingFileContent.isEmpty()) && crawlingFile != null) {
/* Let's report here detailed error to help user when he selected a wrong file */ /* Let's report here detailed error to help user when he selected a wrong file */
if(!crawlingFile.exists()) { if(!crawlingFile.exists()) {
throw new FileNotFoundException(crawlingFile.getAbsolutePath() + " does not exists"); throw new FileNotFoundException(crawlingFile.getAbsolutePath() + " does not exists");
} }
if(!crawlingFile.isFile()) { if(!crawlingFile.isFile()) {
throw new FileNotFoundException(crawlingFile.getAbsolutePath() + " exists but is not a regular file"); throw new FileNotFoundException(crawlingFile.getAbsolutePath() + " exists but is not a regular file");
} }
if(!crawlingFile.canRead()) { if(!crawlingFile.canRead()) {
throw new IOException("Can not read : " + crawlingFile.getAbsolutePath()); throw new IOException("Can not read : " + crawlingFile.getAbsolutePath());
} }
} }
if (crawlingFile != null) { if (crawlingFile != null) {
FileInputStream inStream = null; FileInputStream inStream = null;
try { try {
inStream = new FileInputStream(crawlingFile); inStream = new FileInputStream(crawlingFile);
FileUtils.copy(inStream, writer); FileUtils.copy(inStream, writer);
} finally { } finally {
if(inStream != null) { if(inStream != null) {
try { try {
inStream.close(); inStream.close();
} catch(IOException ignoredException) { } catch(IOException ignoredException) {
ConcurrentLog.info("Crawler_p", "Could not close crawlingFile : " + crawlingFile.getAbsolutePath()); ConcurrentLog.info("Crawler_p", "Could not close crawlingFile : " + crawlingFile.getAbsolutePath());
} }
} }
} }
} else { } else {
FileUtils.copy(crawlingFileContent, writer); FileUtils.copy(crawlingFileContent, writer);
} }
writer.close(); writer.close();
// get links and generate filter // get links and generate filter
hyperlinks_from_file = scraper.getAnchors(); hyperlinks_from_file = scraper.getAnchors();
return hyperlinks_from_file; return hyperlinks_from_file;
} }
private static Date timeParser(final boolean recrawlIfOlderCheck, final int number, final String unit) { private static Date timeParser(final boolean recrawlIfOlderCheck, final int number, final String unit) {
if (!recrawlIfOlderCheck) return null; if (!recrawlIfOlderCheck) return null;

@ -85,8 +85,10 @@ public class Jetty9HttpServerImpl implements YaCyHttpServer {
connector.setName("httpd:"+Integer.toString(port)); connector.setName("httpd:"+Integer.toString(port));
connector.setIdleTimeout(9000); // timout in ms when no bytes send / received connector.setIdleTimeout(9000); // timout in ms when no bytes send / received
connector.setAcceptQueueSize(128); connector.setAcceptQueueSize(128);
server.addConnector(connector); server.addConnector(connector);
// add ssl/https connector // add ssl/https connector
boolean useSSL = sb.getConfigBool("server.https", false); boolean useSSL = sb.getConfigBool("server.https", false);
@ -202,6 +204,7 @@ public class Jetty9HttpServerImpl implements YaCyHttpServer {
context.setServer(server); context.setServer(server);
context.setContextPath("/"); context.setContextPath("/");
context.setHandler(handlers); context.setHandler(handlers);
context.setMaxFormContentSize(1024 * 1024 * 10); // allow 10MB, large forms may be required during crawl starts with long lists
// make YaCy handlers (in context) and servlet context handlers available (both contain root context "/") // make YaCy handlers (in context) and servlet context handlers available (both contain root context "/")
// logic: 1. YaCy handlers are called if request not handled (e.g. proxy) then servlets handle it // logic: 1. YaCy handlers are called if request not handled (e.g. proxy) then servlets handle it

@ -3757,7 +3757,8 @@ public final class Switchboard extends serverSwitch {
if ((failreason = Switchboard.this.stackUrl(profile, turl)) == null) successurls.add(turl); else failurls.put(turl, failreason); if ((failreason = Switchboard.this.stackUrl(profile, turl)) == null) successurls.add(turl); else failurls.put(turl, failreason);
return; return;
} }
final List<Thread> stackthreads = new ArrayList<Thread>(); // do this concurrently final ArrayList<Thread> stackthreads = new ArrayList<Thread>(); // do this concurrently
int maxthreads = 5 * Runtime.getRuntime().availableProcessors();
for (DigestURL url: rootURLs) { for (DigestURL url: rootURLs) {
final DigestURL turl = url; final DigestURL turl = url;
Thread t = new Thread("Switchboard.stackURLs") { Thread t = new Thread("Switchboard.stackURLs") {
@ -3769,7 +3770,13 @@ public final class Switchboard extends serverSwitch {
}; };
t.start(); t.start();
stackthreads.add(t); stackthreads.add(t);
try {Thread.sleep(100);} catch (final InterruptedException e) {} // to prevent that this fires more than 10 connections pre second! if (stackthreads.size() > maxthreads) {
Thread w = stackthreads.get(0);
while (w.isAlive()) {
try {Thread.sleep(100);} catch (final InterruptedException e) {}
}
stackthreads.remove(0);
}
} }
final long waitingtime = 10 + (30000 / rootURLs.size()); // at most wait only halve an minute to prevent that the crawl start runs into a time-out final long waitingtime = 10 + (30000 / rootURLs.size()); // at most wait only halve an minute to prevent that the crawl start runs into a time-out
for (Thread t: stackthreads) try {t.join(waitingtime);} catch (final InterruptedException e) {} for (Thread t: stackthreads) try {t.join(waitingtime);} catch (final InterruptedException e) {}

Loading…
Cancel
Save