further refactoring of dht selection, transfer and flushing

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@1707 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 19 years ago
parent 73dad68cf1
commit 2c4e4ae6a2

@ -0,0 +1,281 @@
// plasmaDHTFlush.java
// ------------------------------
// part of YaCy
// (C) by Michael Peter Christen; mc@anomic.de
// first published on http://www.anomic.de
// Frankfurt, Germany, 2005, 2006
//
// This Class was written by Martin Thelian
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
// Using this software in any meaning (reading, learning, copying, compiling,
// running) means that you agree that the Author(s) is (are) not responsible
// for cost, loss of data or any harm that may be caused directly or indirectly
// by usage of this softare or this documentation. The usage of this software
// is on your own risk. The installation and usage (starting/running) of this
// software may allow other people or application to access your computer and
// any attached devices and is highly dependent on the configuration of the
// software which must be done by the user of the software; the author(s) is
// (are) also not responsible for proper configuration and usage of the
// software, even if provoked by documentation provided together with
// the software.
//
// Any changes to this file according to the GPL as documented in the file
// gpl.txt aside this file in the shipment you received can be done to the
// lines that follows this copyright notice here, but changes must not be
// done inside the copyright notive above. A re-distribution must contain
// the intact and unchanged copyright notice.
// Contributions and changes to the program code must be marked as such.
package de.anomic.plasma;
import de.anomic.server.logging.serverLog;
import de.anomic.yacy.yacySeed;
public class plasmaDHTFlush extends Thread {
private yacySeed seed = null;
private boolean delete = false;
private boolean finished = false;
private boolean gzipBody4Transfer = false;
private int timeout4Transfer = 60000;
private int transferedEntryCount = 0;
private int transferedContainerCount = 0;
private String status = "Running";
private String oldStartingPointHash = "------------", startPointHash = "------------";
private int initialWordsDBSize = 0;
private int chunkSize = 500;
private final long startingTime = System.currentTimeMillis();
private final plasmaSwitchboard sb;
private plasmaDHTTransfer worker = null;
private serverLog log;
private plasmaWordIndex wordIndex;
public plasmaDHTFlush(serverLog log, plasmaWordIndex wordIndex, yacySeed seed, boolean delete, boolean gzipBody, int timeout) {
super(new ThreadGroup("TransferIndexThreadGroup"),"TransferIndex_" + seed.getName());
this.log = log;
this.wordIndex = wordIndex;
this.seed = seed;
this.delete = delete;
this.sb = plasmaSwitchboard.getSwitchboard();
this.initialWordsDBSize = sb.wordIndex.size();
this.gzipBody4Transfer = gzipBody;
this.timeout4Transfer = timeout;
//this.maxOpenFiles4Transfer = (int) sb.getConfigLong("indexTransfer.maxOpenFiles",800);
}
public void run() {
performTransferWholeIndex();
}
public void stopIt(boolean wait) throws InterruptedException {
this.finished = true;
if (wait) this.join();
}
public boolean isFinished() {
return this.finished;
}
public boolean deleteIndex() {
return this.delete;
}
public int[] getIndexCount() {
plasmaDHTTransfer workerThread = this.worker;
if (workerThread != null) {
return new int[]{this.chunkSize, workerThread.dhtChunk.indexCount()};
}
return new int[]{this.chunkSize, 500};
}
public int getTransferedEntryCount() {
return this.transferedEntryCount;
}
public int getTransferedContainerCount() {
return this.transferedContainerCount;
}
public float getTransferedContainerPercent() {
long currentWordsDBSize = sb.wordIndex.size();
if (initialWordsDBSize == 0) return 100;
else if (currentWordsDBSize >= initialWordsDBSize) return 0;
//else return (float) ((initialWordsDBSize-currentWordsDBSize)/(initialWordsDBSize/100));
else return (float)(this.transferedContainerCount*100/initialWordsDBSize);
}
public int getTransferedEntitySpeed() {
long transferTime = System.currentTimeMillis() - startingTime;
if (transferTime <= 0) transferTime = 1;
return (int) ((1000 * transferedEntryCount) / transferTime);
}
public yacySeed getSeed() {
return this.seed;
}
public String[] getStatus() {
plasmaDHTTransfer workerThread = this.worker;
if (workerThread != null) {
return new String[]{this.status,workerThread.dhtChunk.getStatusMessage()};
}
return new String[]{this.status,"Not running"};
}
public String[] getRange() {
plasmaDHTTransfer workerThread = this.worker;
if (workerThread != null) {
return new String[]{"[" + oldStartingPointHash + ".." + startPointHash + "]",
"[" + workerThread.dhtChunk.firstContainer().hashCode() + ".." + workerThread.dhtChunk.lastContainer().hashCode() + "]"};
}
return new String[]{"[" + oldStartingPointHash + ".." + startPointHash + "]","[------------..------------]"};
}
public void performTransferWholeIndex() {
plasmaDHTChunk newDHTChunk = null, oldDHTChunk = null;
try {
// pausing the regular index distribution
// TODO: adding sync, to wait for a still running index distribution to finish
//plasmaWordIndexDistribution.paused = true;
// initial startingpoint of intex transfer is "------------"
log.logFine("Selected hash " + startPointHash + " as start point for index distribution of whole index");
/* Loop until we have
* - finished transfer of whole index
* - detected a server shutdown or user interruption
* - detected a failure
*/
long selectionStart = System.currentTimeMillis(), selectionEnd = 0, selectionTime = 0, iteration = 0;
while (!finished && !Thread.currentThread().isInterrupted()) {
iteration++;
selectionStart = System.currentTimeMillis();
oldDHTChunk = newDHTChunk;
// selecting 500 words to transfer
this.status = "Running: Selecting chunk " + iteration;
newDHTChunk = new plasmaDHTChunk(log, wordIndex, sb.urlPool.loadedURL, this.chunkSize/3, this.chunkSize, this.startPointHash);
/* If we havn't selected a word chunk this could be because of
* a) no words are left in the index
* b) max open file limit was exceeded
*/
if ((newDHTChunk == null) ||
(newDHTChunk.containerSize() == 0) ||
(newDHTChunk.getStatus() == plasmaDHTChunk.chunkStatus_FAILED)) {
if (sb.wordIndex.size() > 0) {
// if there are still words in the index we try it again now
startPointHash = "------------";
} else {
// otherwise we could end transfer now
log.logFine("No index available for index transfer, hash start-point " + startPointHash);
this.status = "Finished. " + iteration + " chunks transfered.";
finished = true;
}
} else {
// getting start point for next DHT-selection
oldStartingPointHash = startPointHash;
startPointHash = newDHTChunk.lastContainer().wordHash(); // DHT targets must have greater hashes
selectionEnd = System.currentTimeMillis();
selectionTime = selectionEnd - selectionStart;
log.logInfo("Index selection of " + newDHTChunk.indexCount() + " words [" + newDHTChunk.firstContainer().wordHash() + " .. " + newDHTChunk.lastContainer().wordHash() + "]" +
" in " +
(selectionTime / 1000) + " seconds (" +
(1000 * newDHTChunk.indexCount() / (selectionTime+1)) + " words/s)");
}
// query status of old worker thread
if (worker != null) {
this.status = "Finished: Selecting chunk " + iteration;
worker.join();
if (worker.dhtChunk.getStatus() != plasmaDHTChunk.chunkStatus_COMPLETE) {
// if the transfer failed we abort index transfer now
this.status = "Aborted because of Transfer error:\n" + worker.dhtChunk.getStatus();
// abort index transfer
return;
} else {
/*
* If index transfer was done successfully we close all remaining open
* files that belong to the old index chunk and handover a new chunk
* to the transfer thread.
* Addintionally we recalculate the chunk size to optimize performance
*/
this.chunkSize = worker.dhtChunk.indexCount();
long transferTime = worker.getTransferTime();
//TODO: only increase chunk Size if there is free memory left on the server
// we need aprox. 73Byte per IndexEntity and an average URL length of 32 char
//if (ft.freeMemory() < 73*2*100)
if (transferTime > 60*1000) {
if (chunkSize>200) chunkSize-=100;
} else if (selectionTime < transferTime){
this.chunkSize +=100;
//chunkSize+=50;
} else if (selectionTime >= selectionTime){
if (chunkSize>200) chunkSize-=100;
}
selectionStart = System.currentTimeMillis();
// deleting transfered words from index
if (delete) {
this.status = "Running: Deleting chunk " + iteration;
transferedEntryCount += oldDHTChunk.indexCount();
transferedContainerCount += oldDHTChunk.containerSize();
int urlReferences = oldDHTChunk.deleteTransferIndexes();
log.logFine("Deleted from " + oldDHTChunk.containerSize() + " transferred RWIs locally " + urlReferences + " URL references");
} else {
transferedEntryCount += oldDHTChunk.indexCount();
transferedContainerCount += oldDHTChunk.containerSize();
}
oldDHTChunk = null;
}
this.worker = null;
}
// handover chunk to transfer worker
if ((newDHTChunk != null) &&
(newDHTChunk.containerSize() > 0) ||
(newDHTChunk.getStatus() == plasmaDHTChunk.chunkStatus_FILLED)) {
worker = new plasmaDHTTransfer(log, seed, newDHTChunk, gzipBody4Transfer, timeout4Transfer, 5);
worker.start();
}
}
// if we reach this point we were aborted by the user or by server shutdown
if (sb.wordIndex.size() > 0) this.status = "aborted";
} catch (Exception e) {
this.status = "Error: " + e.getMessage();
log.logWarning("Index transfer to peer " + seed.getName() + ":" + seed.hash + " failed:'" + e.getMessage() + "'",e);
} finally {
if (worker != null) {
worker.stopIt();
try {worker.join();}catch(Exception e){}
// worker = null;
}
//plasmaWordIndexDistribution.paused = false;
}
}
}

@ -3,8 +3,9 @@
// part of YaCy // part of YaCy
// (C) by Michael Peter Christen; mc@anomic.de // (C) by Michael Peter Christen; mc@anomic.de
// first published on http://www.anomic.de // first published on http://www.anomic.de
// Frankfurt, Germany, 2006 // Frankfurt, Germany, 2005, 2006
// created: 19.02.2006 //
// This class was provided by Martin Thelian
// //
// This program is free software; you can redistribute it and/or modify // This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by // it under the terms of the GNU General Public License as published by
@ -41,8 +42,6 @@
package de.anomic.plasma; package de.anomic.plasma;
import java.util.HashMap;
import de.anomic.server.logging.serverLog; import de.anomic.server.logging.serverLog;
import de.anomic.yacy.yacyClient; import de.anomic.yacy.yacyClient;
import de.anomic.yacy.yacyCore; import de.anomic.yacy.yacyCore;
@ -55,34 +54,27 @@ public class plasmaDHTTransfer extends Thread {
private int timeout4Transfer = 60000; private int timeout4Transfer = 60000;
// status fields // status fields
private boolean finished = false; private boolean stopped = false;
boolean success = false;
private long iteration = 0;
private long transferTime = 0; private long transferTime = 0;
// delivery destination // delivery destination
yacySeed seed = null; yacySeed seed = null;
// word chunk // word chunk
private String endPointHash;
private String startPointHash;
plasmaDHTChunk dhtChunk; plasmaDHTChunk dhtChunk;
// other fields // other fields
HashMap urlCache; private int maxRetry;
serverLog log; serverLog log;
public plasmaDHTTransfer(serverLog log, yacySeed seed, plasmaDHTChunk dhtChunk, public plasmaDHTTransfer(serverLog log, yacySeed seed, plasmaDHTChunk dhtChunk, boolean gzipBody, int timeout, int retries) {
boolean gzipBody, int timeout, long iteration, String endPointHash, String startPointHash) {
super(new ThreadGroup("TransferIndexThreadGroup"), "TransferIndexWorker_" + seed.getName()); super(new ThreadGroup("TransferIndexThreadGroup"), "TransferIndexWorker_" + seed.getName());
this.log = log; this.log = log;
this.gzipBody4Transfer = gzipBody; this.gzipBody4Transfer = gzipBody;
this.timeout4Transfer = timeout; this.timeout4Transfer = timeout;
this.iteration = iteration;
this.seed = seed; this.seed = seed;
this.dhtChunk = dhtChunk; this.dhtChunk = dhtChunk;
this.startPointHash = startPointHash; this.maxRetry = retries;
this.endPointHash = endPointHash;
} }
public void run() { public void run() {
@ -96,42 +88,22 @@ public class plasmaDHTTransfer extends Thread {
} }
} }
public int getStatus() {
return dhtChunk.getStatus();
}
public String getStatusMessage() {
return dhtChunk.getStatusMessage();
}
public boolean success() {
return this.success;
}
public int getIndexCount() {
return this.dhtChunk.indexCount();
}
private boolean isAborted() { private boolean isAborted() {
if (finished || Thread.currentThread().isInterrupted()) { if (stopped || Thread.currentThread().isInterrupted()) {
return true; return true;
} }
return false; return false;
} }
public void stopIt() { public void stopIt() {
this.finished = true; this.stopped = true;
}
public String getRange() {
return "[" + startPointHash + ".." + endPointHash + "]";
} }
public long getTransferTime() { public long getTransferTime() {
return this.transferTime; return this.transferTime;
} }
private void uploadIndex() throws InterruptedException { public void uploadIndex() throws InterruptedException {
/* loop until we /* loop until we
* - have successfully transfered the words list or * - have successfully transfered the words list or
@ -139,33 +111,30 @@ public class plasmaDHTTransfer extends Thread {
*/ */
long retryCount = 0, start = System.currentTimeMillis(); long retryCount = 0, start = System.currentTimeMillis();
while (true) { while (true) {
// testing if we wer aborted // testing if we were aborted
if (isAborted()) return; if (isAborted()) return;
// transfering seleted words to remote peer // transfering seleted words to remote peer
dhtChunk.setStatusMessage("Running: Transfering chunk " + iteration); dhtChunk.setStatusMessage("Running: Transfering chunk to target " + seed.hash + "/" + seed.getName());
dhtChunk.setStatus(plasmaDHTChunk.chunkStatus_RUNNING); dhtChunk.setStatus(plasmaDHTChunk.chunkStatus_RUNNING);
String error = yacyClient.transferIndex(seed, dhtChunk.containers(), urlCache, gzipBody4Transfer, timeout4Transfer); String error = yacyClient.transferIndex(seed, dhtChunk.containers(), dhtChunk.urlCacheMap(), gzipBody4Transfer, timeout4Transfer);
if (error == null) { if (error == null) {
// words successfully transfered // words successfully transfered
transferTime = System.currentTimeMillis() - start; transferTime = System.currentTimeMillis() - start;
this.log.logInfo("Index transfer of " + dhtChunk.indexCount() + " words [" + dhtChunk.firstContainer().wordHash() + " .. " + dhtChunk.lastContainer().wordHash() + "]" + " to peer " + seed.getName() + ":" + seed.hash + " in " + (transferTime / 1000) + " seconds successfull (" this.log.logInfo("Index transfer of " + dhtChunk.indexCount() + " words [" + dhtChunk.firstContainer().wordHash() + " .. " + dhtChunk.lastContainer().wordHash() + "]" + " to peer " + seed.getName() + ":" + seed.hash + " in " + (transferTime / 1000) + " seconds successfull ("
+ (1000 * dhtChunk.indexCount() / (transferTime + 1)) + " words/s)"); + (1000 * dhtChunk.indexCount() / (transferTime + 1)) + " words/s)");
retryCount = 0; retryCount = 0;
dhtChunk.setStatusMessage("Finished: Transfer of chunk to target " + seed.hash + "/" + seed.getName());
this.success = true;
dhtChunk.setStatusMessage("Finished: Transfer of chunk " + iteration);
dhtChunk.setStatus(plasmaDHTChunk.chunkStatus_COMPLETE); dhtChunk.setStatus(plasmaDHTChunk.chunkStatus_COMPLETE);
break; break;
} else { } else {
// worts transfer failed // words transfer failed
// inc retry counter // inc retry counter
retryCount++; retryCount++;
// testing if we were aborted ... // testing if we were aborted ...
if (isAborted()) if (isAborted()) return;
return;
// we have lost the connection to the remote peer. Adding peer to disconnected list // we have lost the connection to the remote peer. Adding peer to disconnected list
this.log.logWarning("Index transfer to peer " + seed.getName() + ":" + seed.hash + " failed:'" + error + "', disconnecting peer"); this.log.logWarning("Index transfer to peer " + seed.getName() + ":" + seed.hash + " failed:'" + error + "', disconnecting peer");
@ -173,7 +142,7 @@ public class plasmaDHTTransfer extends Thread {
// if the retry counter limit was not exceeded we'll retry it in a few seconds // if the retry counter limit was not exceeded we'll retry it in a few seconds
dhtChunk.setStatusMessage("Disconnected peer: " + ((retryCount > 5) ? error + ". Transfer aborted" : "Retry " + retryCount)); dhtChunk.setStatusMessage("Disconnected peer: " + ((retryCount > 5) ? error + ". Transfer aborted" : "Retry " + retryCount));
if (retryCount > 5) { if (retryCount > maxRetry) {
dhtChunk.setStatus(plasmaDHTChunk.chunkStatus_FAILED); dhtChunk.setStatus(plasmaDHTChunk.chunkStatus_FAILED);
return; return;
} }
@ -194,7 +163,7 @@ public class plasmaDHTTransfer extends Thread {
// inc. retry counter // inc. retry counter
retryCount++; retryCount++;
dhtChunk.setStatusMessage("Disconnected peer: Peer ping failed. " + ((retryCount > 5) ? "Transfer aborted." : "Retry " + retryCount)); dhtChunk.setStatusMessage("Disconnected peer: Peer ping failed. " + ((retryCount > 5) ? "Transfer aborted." : "Retry " + retryCount));
if (retryCount > 5) return; if (retryCount > maxRetry) return;
Thread.sleep(retryCount * 5000); Thread.sleep(retryCount * 5000);
continue; continue;
} else { } else {

@ -1909,7 +1909,9 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
public void startTransferWholeIndex(yacySeed seed, boolean delete) { public void startTransferWholeIndex(yacySeed seed, boolean delete) {
if (transferIdxThread == null) { if (transferIdxThread == null) {
this.transferIdxThread = new plasmaDHTFlush(this.log, this.wordIndex, seed, delete); this.transferIdxThread = new plasmaDHTFlush(this.log, this.wordIndex, seed, delete,
"true".equalsIgnoreCase(getConfig("indexTransfer.gzipBody","false")),
(int) getConfigLong("indexTransfer.timeout",60000));
this.transferIdxThread.start(); this.transferIdxThread.start();
} }
} }

@ -337,13 +337,14 @@ public final class plasmaWordIndex {
} }
public synchronized int removeEntries(String wordHash, String[] urlHashes, boolean deleteComplete) { public synchronized int removeEntries(String wordHash, String[] urlHashes, boolean deleteComplete) {
int removed = 0; int removed = ramCache.removeEntries(wordHash, urlHashes, deleteComplete);
removed += ramCache.removeEntries(wordHash, urlHashes, deleteComplete); if (removed == urlHashes.length) return removed;
plasmaWordIndexEntryContainer container = assortmentCluster.removeFromAll(wordHash, -1); plasmaWordIndexEntryContainer container = assortmentCluster.removeFromAll(wordHash, -1);
if (container != null) { if (container != null) {
removed += container.removeEntries(wordHash, urlHashes, deleteComplete); removed += container.removeEntries(wordHash, urlHashes, deleteComplete);
if (container.size() != 0) this.addEntries(container, System.currentTimeMillis(), false); if (container.size() != 0) this.addEntries(container, System.currentTimeMillis(), false);
} }
if (removed == urlHashes.length) return removed;
removed += backend.removeEntries(wordHash, urlHashes, deleteComplete); removed += backend.removeEntries(wordHash, urlHashes, deleteComplete);
return removed; return removed;
} }

@ -43,11 +43,8 @@
package de.anomic.plasma; package de.anomic.plasma;
import java.util.Enumeration;
import de.anomic.yacy.yacyCore; import de.anomic.yacy.yacyCore;
import de.anomic.yacy.yacySeed; import de.anomic.yacy.yacySeed;
import de.anomic.yacy.yacyClient;
import de.anomic.yacy.yacyDHTAction;
import de.anomic.server.logging.serverLog; import de.anomic.server.logging.serverLog;
public final class plasmaWordIndexDistribution { public final class plasmaWordIndexDistribution {
@ -190,60 +187,29 @@ public final class plasmaWordIndexDistribution {
plasmaDHTChunk dhtChunk = new plasmaDHTChunk(this.log, this.wordIndex, this.urlPool.loadedURL, 30, indexCount); plasmaDHTChunk dhtChunk = new plasmaDHTChunk(this.log, this.wordIndex, this.urlPool.loadedURL, 30, indexCount);
try { try {
// find start point for DHT-selection
String keyhash = dhtChunk.lastContainer().wordHash(); // DHT targets must have greater hashes
// find a list of DHT-peers // find a list of DHT-peers
yacySeed[] seeds = new yacySeed[peerCount + 10]; yacySeed[] seeds = yacyCore.dhtAgent.getDHTTargets(log, peerCount, 10, dhtChunk.firstContainer().wordHash(), dhtChunk.lastContainer().wordHash(), 0.4);
int hc0 = 0;
double ownDistance = Math.min(yacyDHTAction.dhtDistance(yacyCore.seedDB.mySeed.hash, dhtChunk.firstContainer().wordHash()), yacyDHTAction.dhtDistance(yacyCore.seedDB.mySeed.hash, dhtChunk.lastContainer().wordHash()));
double maxDistance = Math.min(ownDistance, 0.4);
synchronized (yacyCore.dhtAgent) {
double avdist;
Enumeration e = yacyCore.dhtAgent.getAcceptRemoteIndexSeeds(keyhash);
while ((e.hasMoreElements()) && (hc0 < seeds.length)) {
seeds[hc0] = (yacySeed) e.nextElement();
if (seeds[hc0] != null) {
avdist = Math.max(yacyDHTAction.dhtDistance(seeds[hc0].hash, dhtChunk.firstContainer().wordHash()), yacyDHTAction.dhtDistance(seeds[hc0].hash, dhtChunk.lastContainer().wordHash()));
if (avdist < maxDistance) {
log.logInfo("Selected " + ((hc0 < peerCount) ? "primary" : "reserve") + " DHT target peer " + seeds[hc0].getName() + ":" + seeds[hc0].hash + ", distance = " + avdist);
hc0++;
}
}
}
e = null; // finish enumeration
}
if (hc0 < peerCount) { if (seeds.length < peerCount) {
log.logWarning("found not enough (" + hc0 + ") peers for distribution"); log.logWarning("found not enough (" + seeds.length + ") peers for distribution");
return -1; return -1;
} }
// send away the indexes to all these indexes // send away the indexes to all these indexes
String error;
String peerNames = ""; String peerNames = "";
long start;
int hc1 = 0; int hc1 = 0;
for (int i = 0; i < hc0; i++) { plasmaDHTTransfer transfer = null;
for (int i = 0; i < seeds.length; i++) {
if (this.isClosed()) { if (this.isClosed()) {
this.log.logSevere("Index distribution interrupted by close, nothing deleted locally."); this.log.logSevere("Index distribution interrupted by close, nothing deleted locally.");
return -1; // interrupted return -1; // interrupted
} }
start = System.currentTimeMillis(); transfer = new plasmaDHTTransfer(log, seeds[i], dhtChunk, this.gzipBody4Distribution, this.timeout4Distribution, 0);
error = yacyClient.transferIndex( try {transfer.uploadIndex();} catch (InterruptedException e) {}
seeds[i],
dhtChunk.containers(), if (transfer.dhtChunk.getStatus() == plasmaDHTChunk.chunkStatus_COMPLETE) {
dhtChunk.urlCacheMap(),
this.gzipBody4Distribution,
this.timeout4Distribution);
if (error == null) {
this.log.logInfo("Index transfer of " + indexCount + " words [" + dhtChunk.firstContainer().wordHash() + " .. " + dhtChunk.lastContainer().wordHash() + "] to peer " + seeds[i].getName() + ":" + seeds[i].hash + " in " + ((System.currentTimeMillis() - start) / 1000)
+ " seconds successfull (" + (1000 * indexCount / (System.currentTimeMillis() - start + 1)) + " words/s)");
peerNames += ", " + seeds[i].getName(); peerNames += ", " + seeds[i].getName();
hc1++; hc1++;
} else {
this.log.logWarning("Index transfer to peer " + seeds[i].getName() + ":" + seeds[i].hash + " failed:'" + error + "', disconnecting peer");
yacyCore.peerActions.peerDeparture(seeds[i]);
} }
if (hc1 >= peerCount) break; if (hc1 >= peerCount) break;
} }

@ -44,6 +44,7 @@ import java.util.Enumeration;
import de.anomic.kelondro.kelondroException; import de.anomic.kelondro.kelondroException;
import de.anomic.kelondro.kelondroMScoreCluster; import de.anomic.kelondro.kelondroMScoreCluster;
import de.anomic.server.logging.serverLog;
public class yacyDHTAction implements yacyPeerAction { public class yacyDHTAction implements yacyPeerAction {
@ -260,4 +261,34 @@ public class yacyDHTAction implements yacyPeerAction {
else else
return ((double) (((byte) from) - ((byte) to))) / maxAtomarDistance; return ((double) (((byte) from) - ((byte) to))) / maxAtomarDistance;
} }
public synchronized yacySeed[] getDHTTargets(serverLog log, int primaryPeerCount, int reservePeerCount, String firstKey, String lastKey, double maxDist) {
// find a list of DHT-peers
yacySeed[] seeds = new yacySeed[primaryPeerCount + reservePeerCount];
int hc0 = 0;
double ownDistance = Math.min(yacyDHTAction.dhtDistance(yacyCore.seedDB.mySeed.hash, firstKey), yacyDHTAction.dhtDistance(yacyCore.seedDB.mySeed.hash, lastKey));
double maxDistance = Math.min(ownDistance, maxDist);
double avdist;
Enumeration e = this.getAcceptRemoteIndexSeeds(lastKey);
while ((e.hasMoreElements()) && (hc0 < seeds.length)) {
seeds[hc0] = (yacySeed) e.nextElement();
if (seeds[hc0] != null) {
avdist = Math.max(yacyDHTAction.dhtDistance(seeds[hc0].hash, firstKey), yacyDHTAction.dhtDistance(seeds[hc0].hash, lastKey));
if (avdist < maxDistance) {
if (log != null) log.logInfo("Selected " + ((hc0 < primaryPeerCount) ? "primary" : "reserve") + " DHT target peer " + seeds[hc0].getName() + ":" + seeds[hc0].hash + ", distance = " + avdist);
hc0++;
}
}
}
e = null; // finish enumeration
if (hc0 == seeds.length) {
return seeds;
} else {
yacySeed[] s = new yacySeed[hc0];
System.arraycopy(seeds, 0, s, 0, hc0);
return s;
}
}
} }

Loading…
Cancel
Save