This organizes all urls to be loaded in separate queues for each host. Each host separates the crawl depth into it's own queue. The primary rule for urls taken from any queue is, that the crawl depth is minimal. This produces a crawl depth which is identical to the clickdepth. Furthermorem the crawl is able to create a much better balancing over all hosts which is fair to all hosts that are in the queue. This process will create a very large number of files for wide crawls in the QUEUES folder: for each host a directory, for each crawl depth a file inside the directory. A crawl with maxdepth = 4 will be able to create 10.000s of files. To be able to use that many file readers, it was necessary to implement a new index data structure which opens the file only if an access is wanted (OnDemandOpenFileIndex). The usage of such on-demand file reader shall prevent that the number of file pointers is over the system limit, which is usually about 10.000 open files. Some parts of YaCy had to be adopted to handle the crawl depth number correctly. The logging and the IndexCreateQueues servlet had to be adopted to show the crawl queues differently, because the host name is attached to the port on the host to differentiate between http, https, and ftp services.pull/1/head
parent
075b6f9278
commit
da86f150ab
@ -0,0 +1,372 @@
|
||||
/**
|
||||
* HostQueues
|
||||
* Copyright 2013 by Michael Christen
|
||||
* First released 24.09.2013 at http://yacy.net
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with this program in the file lgpl21.txt
|
||||
* If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package net.yacy.crawler;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.ConcurrentModificationException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import net.yacy.cora.document.encoding.ASCII;
|
||||
import net.yacy.cora.document.id.DigestURL;
|
||||
import net.yacy.cora.order.Base64Order;
|
||||
import net.yacy.cora.protocol.ClientIdentification;
|
||||
import net.yacy.cora.storage.HandleSet;
|
||||
import net.yacy.cora.util.ConcurrentLog;
|
||||
import net.yacy.cora.util.SpaceExceededException;
|
||||
import net.yacy.crawler.data.CrawlProfile;
|
||||
import net.yacy.crawler.data.Latency;
|
||||
import net.yacy.crawler.retrieval.Request;
|
||||
import net.yacy.crawler.robots.RobotsTxt;
|
||||
import net.yacy.kelondro.data.word.Word;
|
||||
import net.yacy.kelondro.index.RowHandleSet;
|
||||
|
||||
/**
|
||||
* wrapper for single HostQueue queues; this is a collection of such queues.
|
||||
* All these queues are stored in a common directory for the queue stacks.
|
||||
*
|
||||
* ATTENTION: the order of urls returned by this balancer must strictly follow the clickdepth order.
|
||||
* That means that all links from a given host must be returned from the lowest crawldepth only.
|
||||
* The crawldepth is interpreted as clickdepth and the crawler is producing that semantic using a
|
||||
* correct crawl ordering.
|
||||
*/
|
||||
public class HostBalancer implements Balancer {
|
||||
|
||||
|
||||
private final File hostsPath;
|
||||
private final boolean exceed134217727;
|
||||
private final Map<String, HostQueue> queues;
|
||||
private final Set<String> roundRobinHostHashes;
|
||||
private HandleSet urlHashDoubleCheck;
|
||||
|
||||
public HostBalancer(
|
||||
final File hostsPath,
|
||||
final boolean exceed134217727) {
|
||||
this.hostsPath = hostsPath;
|
||||
this.exceed134217727 = exceed134217727;
|
||||
this.urlHashDoubleCheck = new RowHandleSet(Word.commonHashLength, Word.commonHashOrder, 0);
|
||||
|
||||
// create a stack for newly entered entries
|
||||
if (!(hostsPath.exists())) hostsPath.mkdirs(); // make the path
|
||||
this.queues = new ConcurrentHashMap<String, HostQueue>();
|
||||
String[] list = this.hostsPath.list();
|
||||
for (String address: list) try {
|
||||
File queuePath = new File(this.hostsPath, address);
|
||||
HostQueue queue = new HostQueue(queuePath, this.queues.size() > 100, this.exceed134217727);
|
||||
if (queue.size() == 0) {
|
||||
queue.close();
|
||||
queuePath.delete();
|
||||
} else {
|
||||
this.queues.put(DigestURL.hosthash(queue.getHost(), queue.getPort()), queue);
|
||||
}
|
||||
} catch (MalformedURLException e) {
|
||||
ConcurrentLog.logException(e);
|
||||
}
|
||||
this.roundRobinHostHashes = new HashSet<String>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
if (this.urlHashDoubleCheck != null) {
|
||||
this.urlHashDoubleCheck.clear();
|
||||
}
|
||||
for (HostQueue queue: this.queues.values()) queue.close();
|
||||
this.queues.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
if (this.urlHashDoubleCheck != null) {
|
||||
this.urlHashDoubleCheck.clear();
|
||||
}
|
||||
for (HostQueue queue: this.queues.values()) queue.clear();
|
||||
this.queues.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Request get(final byte[] urlhash) throws IOException {
|
||||
String hosthash = ASCII.String(urlhash, 6, 6);
|
||||
HostQueue queue = this.queues.get(hosthash);
|
||||
if (queue == null) return null;
|
||||
return queue.get(urlhash);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int removeAllByProfileHandle(final String profileHandle, final long timeout) throws IOException, SpaceExceededException {
|
||||
int c = 0;
|
||||
for (HostQueue queue: this.queues.values()) c += queue.removeAllByProfileHandle(profileHandle, timeout);
|
||||
return c;
|
||||
}
|
||||
|
||||
/**
|
||||
* delete all urls which are stored for given host hashes
|
||||
* @param hosthashes
|
||||
* @return number of deleted urls
|
||||
*/
|
||||
@Override
|
||||
public int removeAllByHostHashes(final Set<String> hosthashes) {
|
||||
int c = 0;
|
||||
for (String h: hosthashes) {
|
||||
HostQueue hq = this.queues.get(h);
|
||||
if (hq != null) c += hq.removeAllByHostHashes(hosthashes);
|
||||
}
|
||||
// remove from cache
|
||||
Iterator<byte[]> i = this.urlHashDoubleCheck.iterator();
|
||||
ArrayList<String> deleteHashes = new ArrayList<String>();
|
||||
while (i.hasNext()) {
|
||||
String h = ASCII.String(i.next());
|
||||
if (hosthashes.contains(h.substring(6))) deleteHashes.add(h);
|
||||
}
|
||||
for (String h: deleteHashes) this.urlHashDoubleCheck.remove(ASCII.getBytes(h));
|
||||
return c;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int remove(final HandleSet urlHashes) throws IOException {
|
||||
Map<String, HandleSet> removeLists = new ConcurrentHashMap<String, HandleSet>();
|
||||
for (byte[] urlhash: urlHashes) {
|
||||
this.urlHashDoubleCheck.remove(urlhash);
|
||||
String hosthash = ASCII.String(urlhash, 6, 6);
|
||||
HandleSet removeList = removeLists.get(hosthash);
|
||||
if (removeList == null) {
|
||||
removeList = new RowHandleSet(Word.commonHashLength, Base64Order.enhancedCoder, 100);
|
||||
removeLists.put(hosthash, removeList);
|
||||
}
|
||||
try {removeList.put(urlhash);} catch (SpaceExceededException e) {}
|
||||
}
|
||||
int c = 0;
|
||||
for (Map.Entry<String, HandleSet> entry: removeLists.entrySet()) {
|
||||
HostQueue queue = this.queues.get(entry.getKey());
|
||||
if (queue != null) c += queue.remove(entry.getValue());
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean has(final byte[] urlhashb) {
|
||||
if (this.urlHashDoubleCheck.has(urlhashb)) return true;
|
||||
String hosthash = ASCII.String(urlhashb, 6, 6);
|
||||
HostQueue queue = this.queues.get(hosthash);
|
||||
if (queue == null) return false;
|
||||
return queue.has(urlhashb);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
int c = 0;
|
||||
for (HostQueue queue: this.queues.values()) c += queue.size();
|
||||
return c;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
for (HostQueue queue: this.queues.values()) if (!queue.isEmpty()) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* push a request to one of the host queues. If the queue does not exist, it is created
|
||||
* @param entry
|
||||
* @param profile
|
||||
* @param robots
|
||||
* @return null if everything is ok or a string with an error message if the push is not allowed according to the crawl profile or robots
|
||||
* @throws IOException
|
||||
* @throws SpaceExceededException
|
||||
*/
|
||||
@Override
|
||||
public synchronized String push(final Request entry, CrawlProfile profile, final RobotsTxt robots) throws IOException, SpaceExceededException {
|
||||
if (this.has(entry.url().hash())) return "double occurrence";
|
||||
this.urlHashDoubleCheck.put(entry.url().hash());
|
||||
String hosthash = ASCII.String(entry.url().hash(), 6, 6);
|
||||
HostQueue queue = this.queues.get(hosthash);
|
||||
if (queue == null) {
|
||||
queue = new HostQueue(this.hostsPath, entry.url().getHost(), entry.url().getPort(), this.queues.size() > 100, this.exceed134217727);
|
||||
this.queues.put(hosthash, queue);
|
||||
}
|
||||
return queue.push(entry, profile, robots);
|
||||
}
|
||||
|
||||
/**
|
||||
* get the next entry in this crawl queue in such a way that the domain access time delta is maximized
|
||||
* and always above the given minimum delay time. An additional delay time is computed using the robots.txt
|
||||
* crawl-delay time which is always respected. In case the minimum time cannot ensured, this method pauses
|
||||
* the necessary time until the url is released and returned as CrawlEntry object. In case that a profile
|
||||
* for the computed Entry does not exist, null is returned
|
||||
* @param delay true if the requester demands forced delays using explicit thread sleep
|
||||
* @param profile
|
||||
* @return a url in a CrawlEntry object
|
||||
* @throws IOException
|
||||
* @throws SpaceExceededException
|
||||
*/
|
||||
@Override
|
||||
public synchronized Request pop(boolean delay, CrawlSwitchboard cs, RobotsTxt robots) throws IOException {
|
||||
tryagain: while (true) try {
|
||||
if (this.roundRobinHostHashes.size() == 0) {
|
||||
// select all queues on the lowest crawldepth level; that means: first look for the lowest level
|
||||
int lowestCrawldepth = Integer.MAX_VALUE;
|
||||
for (HostQueue hq: this.queues.values()) {
|
||||
int lsd = hq.getLowestStackDepth();
|
||||
if (lsd < lowestCrawldepth) lowestCrawldepth = lsd;
|
||||
}
|
||||
// now add only such stacks which have the lowest level
|
||||
for (Map.Entry<String, HostQueue> entry: this.queues.entrySet()) {
|
||||
if (entry.getValue().getLowestStackDepth() == lowestCrawldepth) this.roundRobinHostHashes.add(entry.getKey());
|
||||
}
|
||||
// emergency case if this fails
|
||||
if (this.roundRobinHostHashes.size() == 0) {
|
||||
//assert this.queues.size() == 0; // thats the only case where that should happen
|
||||
this.roundRobinHostHashes.addAll(this.queues.keySet());
|
||||
}
|
||||
// if there are stacks with less than 10 entries, remove all stacks with more than 10 entries
|
||||
// this shall kick out small stacks to prevent that too many files are opened for very wide crawls
|
||||
boolean smallStacksExist = false;
|
||||
smallsearch: for (String s: this.roundRobinHostHashes) {
|
||||
HostQueue hq = this.queues.get(s);
|
||||
if (hq != null && hq.size() <= 10) {smallStacksExist = true; break smallsearch;}
|
||||
}
|
||||
if (smallStacksExist) {
|
||||
Iterator<String> i = this.roundRobinHostHashes.iterator();
|
||||
while (i.hasNext()) {
|
||||
String s = i.next();
|
||||
HostQueue hq = this.queues.get(s);
|
||||
if (hq != null && hq.size() > 10) {i.remove();}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (this.roundRobinHostHashes.size() == 0) return null;
|
||||
|
||||
// first strategy: get one entry which does not need sleep time
|
||||
for (String nextHH: this.roundRobinHostHashes) {
|
||||
HostQueue hq = this.queues.get(nextHH);
|
||||
int delta = Latency.waitingRemainingGuessed(hq.getHost(), DigestURL.hosthash(hq.getHost(), hq.getPort()), robots, ClientIdentification.yacyInternetCrawlerAgent);
|
||||
if (delta <= 10) {
|
||||
this.roundRobinHostHashes.remove(nextHH);
|
||||
Request request = hq == null ? null : hq.pop(delay, cs, robots);
|
||||
int size = hq == null ? 0 : hq.size();
|
||||
if (size == 0) {
|
||||
hq.close();
|
||||
this.queues.remove(nextHH);
|
||||
}
|
||||
if (request != null) return request;
|
||||
}
|
||||
}
|
||||
|
||||
// second strategy: take from the largest stack and clean round robin cache
|
||||
int largest = Integer.MIN_VALUE;
|
||||
String nextHH = null;
|
||||
for (String h: this.roundRobinHostHashes) {
|
||||
HostQueue hq = this.queues.get(h);
|
||||
if (hq != null) {
|
||||
int s = hq.size();
|
||||
if (s > largest) {
|
||||
largest = s;
|
||||
nextHH = h;
|
||||
}
|
||||
}
|
||||
}
|
||||
this.roundRobinHostHashes.clear(); // start from the beginning next time
|
||||
HostQueue hq = this.queues.get(nextHH);
|
||||
Request request = hq == null ? null : hq.pop(delay, cs, robots);
|
||||
if (hq != null && hq.size() == 0) {
|
||||
hq.close();
|
||||
this.queues.remove(nextHH);
|
||||
}
|
||||
return request;
|
||||
} catch (ConcurrentModificationException e) {
|
||||
continue tryagain;
|
||||
} catch (Throwable e) {
|
||||
throw new IOException(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<Request> iterator() throws IOException {
|
||||
final Iterator<HostQueue> hostsIterator = this.queues.values().iterator();
|
||||
@SuppressWarnings("unchecked")
|
||||
final Iterator<Request>[] hostIterator = new Iterator[1];
|
||||
hostIterator[0] = null;
|
||||
return new Iterator<Request>() {
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return hostsIterator.hasNext() || (hostIterator[0] != null && hostIterator[0].hasNext());
|
||||
}
|
||||
@Override
|
||||
public Request next() {
|
||||
synchronized (HostBalancer.this) {
|
||||
while (hostIterator[0] == null || !hostIterator[0].hasNext()) try {
|
||||
HostQueue entry = hostsIterator.next();
|
||||
hostIterator[0] = entry.iterator();
|
||||
} catch (IOException e) {}
|
||||
if (!hostIterator[0].hasNext()) return null;
|
||||
return hostIterator[0].next();
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public void remove() {
|
||||
hostIterator[0].remove();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* get a list of domains that are currently maintained as domain stacks
|
||||
* @return a map of clear text strings of host names to an integer array: {the size of the domain stack, guessed delta waiting time}
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Integer[]> getDomainStackHosts(RobotsTxt robots) {
|
||||
Map<String, Integer[]> map = new TreeMap<String, Integer[]>(); // we use a tree map to get a stable ordering
|
||||
for (HostQueue hq: this.queues.values()) try {
|
||||
int delta = Latency.waitingRemainingGuessed(hq.getHost(), DigestURL.hosthash(hq.getHost(), hq.getPort()), robots, ClientIdentification.yacyInternetCrawlerAgent);
|
||||
map.put(hq.getHost() + ":" + hq.getPort(), new Integer[]{hq.size(), delta});
|
||||
} catch (MalformedURLException e) {
|
||||
ConcurrentLog.logException(e);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
* get lists of crawl request entries for a specific host
|
||||
* @param host
|
||||
* @param maxcount
|
||||
* @param maxtime
|
||||
* @return a list of crawl loader requests
|
||||
*/
|
||||
@Override
|
||||
public List<Request> getDomainStackReferences(String host, int maxcount, long maxtime) {
|
||||
try {
|
||||
HostQueue hq = this.queues.get(DigestURL.hosthash(host, host.startsWith("ftp.") ? 21 : 80));
|
||||
if (hq == null) hq = this.queues.get(DigestURL.hosthash(host, 443));
|
||||
return hq == null ? new ArrayList<Request>(0) : hq.getDomainStackReferences(host, maxcount, maxtime);
|
||||
} catch (MalformedURLException e) {
|
||||
ConcurrentLog.logException(e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,169 +0,0 @@
|
||||
/**
|
||||
* HostQueues
|
||||
* Copyright 2013 by Michael Christen
|
||||
* First released 24.09.2013 at http://yacy.net
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with this program in the file lgpl21.txt
|
||||
* If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package net.yacy.crawler;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import net.yacy.cora.document.encoding.ASCII;
|
||||
import net.yacy.cora.order.Base64Order;
|
||||
import net.yacy.cora.storage.HandleSet;
|
||||
import net.yacy.cora.util.SpaceExceededException;
|
||||
import net.yacy.crawler.data.CrawlProfile;
|
||||
import net.yacy.crawler.retrieval.Request;
|
||||
import net.yacy.crawler.robots.RobotsTxt;
|
||||
import net.yacy.kelondro.data.word.Word;
|
||||
import net.yacy.kelondro.index.RowHandleSet;
|
||||
|
||||
/**
|
||||
* wrapper for single HostQueue queues; this is a collection of such queues.
|
||||
* All these queues are stored in a common directory for the queue stacks
|
||||
*/
|
||||
public class HostQueues {
|
||||
|
||||
private final File queuesPath;
|
||||
private final boolean useTailCache;
|
||||
private final boolean exceed134217727;
|
||||
private final Map<String, HostQueue> queues;
|
||||
|
||||
public HostQueues(
|
||||
final File queuesPath,
|
||||
final boolean useTailCache,
|
||||
final boolean exceed134217727) {
|
||||
this.queuesPath = queuesPath;
|
||||
this.useTailCache = useTailCache;
|
||||
this.exceed134217727 = exceed134217727;
|
||||
|
||||
// create a stack for newly entered entries
|
||||
if (!(queuesPath.exists())) queuesPath.mkdir(); // make the path
|
||||
this.queuesPath.mkdirs();
|
||||
this.queues = new HashMap<String, HostQueue>();
|
||||
String[] list = this.queuesPath.list();
|
||||
for (String queuefile: list) {
|
||||
if (queuefile.endsWith(HostQueue.indexSuffix)) {
|
||||
String hosthash = queuefile.substring(0, queuefile.length() - HostQueue.indexSuffix.length());
|
||||
HostQueue queue = new HostQueue(this.queuesPath, hosthash, this.useTailCache, this.exceed134217727);
|
||||
this.queues.put(hosthash, queue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void close() {
|
||||
for (HostQueue queue: this.queues.values()) queue.close();
|
||||
this.queues.clear();
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
for (HostQueue queue: this.queues.values()) queue.clear();
|
||||
this.queues.clear();
|
||||
}
|
||||
|
||||
public Request get(final byte[] urlhash) throws IOException {
|
||||
String hosthash = ASCII.String(urlhash, 6, 6);
|
||||
HostQueue queue = this.queues.get(hosthash);
|
||||
if (queue == null) return null;
|
||||
return queue.get(urlhash);
|
||||
}
|
||||
|
||||
public int removeAllByProfileHandle(final String profileHandle, final long timeout) throws IOException, SpaceExceededException {
|
||||
int c = 0;
|
||||
for (HostQueue queue: this.queues.values()) c += queue.removeAllByProfileHandle(profileHandle, timeout);
|
||||
return c;
|
||||
}
|
||||
|
||||
public synchronized int remove(final HandleSet urlHashes) throws IOException {
|
||||
Map<String, HandleSet> removeLists = new HashMap<String, HandleSet>();
|
||||
for (byte[] urlhash: urlHashes) {
|
||||
String hosthash = ASCII.String(urlhash, 6, 6);
|
||||
HandleSet removeList = removeLists.get(hosthash);
|
||||
if (removeList == null) {
|
||||
removeList = new RowHandleSet(Word.commonHashLength, Base64Order.enhancedCoder, 100);
|
||||
removeLists.put(hosthash, removeList);
|
||||
}
|
||||
try {removeList.put(urlhash);} catch (SpaceExceededException e) {}
|
||||
}
|
||||
int c = 0;
|
||||
for (Map.Entry<String, HandleSet> entry: removeLists.entrySet()) {
|
||||
HostQueue queue = this.queues.get(entry.getKey());
|
||||
if (queue != null) c += queue.remove(entry.getValue());
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
public boolean has(final byte[] urlhashb) {
|
||||
String hosthash = ASCII.String(urlhashb, 6, 6);
|
||||
HostQueue queue = this.queues.get(hosthash);
|
||||
if (queue == null) return false;
|
||||
return queue.has(urlhashb);
|
||||
}
|
||||
|
||||
public int size() {
|
||||
int c = 0;
|
||||
for (HostQueue queue: this.queues.values()) c += queue.size();
|
||||
return c;
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
for (HostQueue queue: this.queues.values()) if (!queue.isEmpty()) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* push a request to one of the host queues. If the queue does not exist, it is created
|
||||
* @param entry
|
||||
* @param profile
|
||||
* @param robots
|
||||
* @return null if everything is ok or a string with an error message if the push is not allowed according to the crawl profile or robots
|
||||
* @throws IOException
|
||||
* @throws SpaceExceededException
|
||||
*/
|
||||
public String push(final Request entry, CrawlProfile profile, final RobotsTxt robots) throws IOException, SpaceExceededException {
|
||||
String hosthash = ASCII.String(entry.url().hash(), 6, 6);
|
||||
HostQueue queue = this.queues.get(hosthash);
|
||||
if (queue == null) {
|
||||
queue = new HostQueue(this.queuesPath, hosthash, this.useTailCache, this.exceed134217727);
|
||||
this.queues.put(hosthash, queue);
|
||||
}
|
||||
return queue.push(entry, profile, robots);
|
||||
}
|
||||
|
||||
/**
|
||||
* remove one request from all stacks except from those as listed in notFromHost
|
||||
* @param notFromHost do not collect from these hosts
|
||||
* @return a list of requests
|
||||
* @throws IOException
|
||||
*/
|
||||
public List<Request> pop(Set<String> notFromHost) throws IOException {
|
||||
ArrayList<Request> requests = new ArrayList<Request>();
|
||||
for (Map.Entry<String, HostQueue> entry: this.queues.entrySet()) {
|
||||
if (notFromHost.contains(entry.getKey())) continue;
|
||||
Request r = entry.getValue().pop();
|
||||
if (r != null) requests.add(r);
|
||||
}
|
||||
return requests;
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,420 @@
|
||||
/**
|
||||
* OnDemandOpenFileIndex
|
||||
* Copyright 2014 by Michael Christen
|
||||
* First released 16.04.2014 at http://yacy.net
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with this program in the file lgpl21.txt
|
||||
* If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
package net.yacy.kelondro.index;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import net.yacy.cora.order.CloneableIterator;
|
||||
import net.yacy.cora.util.ConcurrentLog;
|
||||
import net.yacy.cora.util.SpaceExceededException;
|
||||
import net.yacy.kelondro.index.Row.Entry;
|
||||
import net.yacy.kelondro.table.Table;
|
||||
import net.yacy.kelondro.util.kelondroException;
|
||||
|
||||
/**
|
||||
* a write buffer for ObjectIndex entries
|
||||
* @author Michael Peter Christen
|
||||
*
|
||||
*/
|
||||
public class OnDemandOpenFileIndex implements Index, Iterable<Row.Entry> {
|
||||
|
||||
private final File file;
|
||||
private final Row rowdef;
|
||||
private int sizecache;
|
||||
private final boolean exceed134217727;
|
||||
|
||||
public OnDemandOpenFileIndex(final File file, Row rowdef, final boolean exceed134217727) {
|
||||
this.file = file;
|
||||
this.rowdef = rowdef;
|
||||
this.exceed134217727 = exceed134217727;
|
||||
this.sizecache = -1;
|
||||
}
|
||||
|
||||
private Index getIndex() {
|
||||
try {
|
||||
return new Table(file, rowdef, 1000, 0, false, exceed134217727, false);
|
||||
} catch (kelondroException e) {
|
||||
ConcurrentLog.logException(e);
|
||||
return null;
|
||||
} catch (SpaceExceededException e) {
|
||||
ConcurrentLog.logException(e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized byte[] smallestKey() {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
byte[] b = index.smallestKey();
|
||||
index.close();
|
||||
return b;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized byte[] largestKey() {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
byte[] b = index.largestKey();
|
||||
index.close();
|
||||
return b;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void optimize() {
|
||||
Index index = getIndex();
|
||||
if (index == null) return;
|
||||
index.optimize();
|
||||
index.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized long mem() {
|
||||
Index index = getIndex();
|
||||
if (index == null) return 0;
|
||||
long l = index.mem();
|
||||
index.close();
|
||||
return l;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void addUnique(final Entry row) throws SpaceExceededException, IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return;
|
||||
try {
|
||||
index.addUnique(row);
|
||||
if (this.sizecache >= 0) this.sizecache++;
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void clear() throws IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return;
|
||||
try {
|
||||
index.clear();
|
||||
this.sizecache = 0;
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void deleteOnExit() {
|
||||
Index index = getIndex();
|
||||
index.deleteOnExit();
|
||||
index.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String filename() {
|
||||
return this.file.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int size() {
|
||||
if (sizecache >= 0) return sizecache;
|
||||
Index index = getIndex();
|
||||
if (index == null) return 0;
|
||||
int i = index.size();
|
||||
index.close();
|
||||
this.sizecache = i;
|
||||
return i;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Entry get(final byte[] key, final boolean forcecopy) throws IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
try {
|
||||
return index.get(key, forcecopy);
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Map<byte[], Row.Entry> get(final Collection<byte[]> keys, final boolean forcecopy) throws IOException, InterruptedException {
|
||||
final Map<byte[], Row.Entry> map = new TreeMap<byte[], Row.Entry>(row().objectOrder);
|
||||
Row.Entry entry;
|
||||
for (final byte[] key: keys) {
|
||||
entry = get(key, forcecopy);
|
||||
if (entry != null) map.put(key, entry);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean has(final byte[] key) {
|
||||
Index index = getIndex();
|
||||
if (index == null) return false;
|
||||
boolean b = index.has(key);
|
||||
index.close();
|
||||
return b;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean isEmpty() {
|
||||
Index index = getIndex();
|
||||
if (index == null) return true;
|
||||
boolean b = index.isEmpty();
|
||||
if (b) this.sizecache = 0;
|
||||
index.close();
|
||||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the row to the index. The row is identified by the primary key of the row.
|
||||
* @param row a index row
|
||||
* @return true if this set did _not_ already contain the given row.
|
||||
* @throws IOException
|
||||
* @throws SpaceExceededException
|
||||
*/
|
||||
@Override
|
||||
public synchronized boolean put(final Entry row) throws IOException, SpaceExceededException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return false;
|
||||
try {
|
||||
boolean b = index.put(row);
|
||||
if (this.sizecache >= 0 && b) this.sizecache++;
|
||||
return b;
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Entry remove(final byte[] key) throws IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
try {
|
||||
Entry e = index.remove(key);
|
||||
if (this.sizecache >= 0 && e != null) this.sizecache--;
|
||||
return e;
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized boolean delete(final byte[] key) throws IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return false;
|
||||
try {
|
||||
boolean b = index.delete(key);
|
||||
if (this.sizecache >= 0 && b) this.sizecache--;
|
||||
return b;
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized List<RowCollection> removeDoubles() throws IOException, SpaceExceededException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
try {
|
||||
List<RowCollection> l = index.removeDoubles();
|
||||
this.sizecache = index.size();
|
||||
return l;
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized List<Row.Entry> top(final int count) throws IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
try {
|
||||
return index.top(count);
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized List<Row.Entry> random(final int count) throws IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
try {
|
||||
return index.random(count);
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Entry removeOne() throws IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
try {
|
||||
Entry e = index.removeOne();
|
||||
if (this.sizecache >= 0 && e != null) this.sizecache--;
|
||||
return e;
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Entry replace(final Entry row) throws SpaceExceededException, IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
try {
|
||||
Entry e = index.replace(row);
|
||||
if (this.sizecache >= 0 && e == null) this.sizecache++;
|
||||
return e;
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Row row() {
|
||||
return this.rowdef;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized CloneableIterator<byte[]> keys(final boolean up, final byte[] firstKey) throws IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
try {
|
||||
return index.keys(up, firstKey);
|
||||
} catch (IOException e) {
|
||||
throw e;
|
||||
} finally {
|
||||
index.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Iterator<Entry> iterator() {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
List<Entry> list = new ArrayList<Entry>();
|
||||
Iterator<Entry> i = index.iterator();
|
||||
while (i.hasNext()) list.add(i.next());
|
||||
index.close();
|
||||
return list.iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized CloneableIterator<Entry> rows(final boolean up, final byte[] firstKey) throws IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
final List<Entry> list = new ArrayList<Entry>();
|
||||
final Iterator<Entry> i = index.rows(up, firstKey);
|
||||
while (i.hasNext()) list.add(i.next());
|
||||
index.close();
|
||||
final Iterator<Entry> li = list.iterator();
|
||||
return new CloneableIterator<Entry>(){
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return li.hasNext();
|
||||
}
|
||||
@Override
|
||||
public Entry next() {
|
||||
return li.next();
|
||||
}
|
||||
@Override
|
||||
public void remove() {
|
||||
li.remove();
|
||||
}
|
||||
@Override
|
||||
public CloneableIterator<Entry> clone(Object modifier) {
|
||||
return null;
|
||||
}
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized CloneableIterator<Entry> rows() throws IOException {
|
||||
Index index = getIndex();
|
||||
if (index == null) return null;
|
||||
final List<Entry> list = new ArrayList<Entry>();
|
||||
final Iterator<Entry> i = index.rows();
|
||||
while (i.hasNext()) list.add(i.next());
|
||||
index.close();
|
||||
final Iterator<Entry> li = list.iterator();
|
||||
return new CloneableIterator<Entry>(){
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return li.hasNext();
|
||||
}
|
||||
@Override
|
||||
public Entry next() {
|
||||
return li.next();
|
||||
}
|
||||
@Override
|
||||
public void remove() {
|
||||
li.remove();
|
||||
}
|
||||
@Override
|
||||
public CloneableIterator<Entry> clone(Object modifier) {
|
||||
return null;
|
||||
}
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in new issue