fix for memory leak bug in new dht transmissions

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5606 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 16 years ago
parent 2173865f92
commit be0c492ae5

@ -1913,29 +1913,38 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<IndexingStack.
if (this.log.isFine()) log.logFine(rejectReason); if (this.log.isFine()) log.logFine(rejectReason);
return false; return false;
} }
boolean hasDoneSomething = false;
if (this.dhtDispatcher.cloudSize() > this.webIndex.seedDB.scheme.verticalPartitions() * 4) {
log.logInfo("dhtTransferJob: no selection, too many entries in transmission cloud: " + this.dhtDispatcher.cloudSize());
} else if (MemoryControl.available() < 1024*1024*20) {
log.logInfo("dhtTransferJob: no selection, too less memory available : " + (MemoryControl.available() / 1024 / 1024) + " MB");
} else {
String startHash = PeerSelection.selectTransferStart(); String startHash = PeerSelection.selectTransferStart();
log.logInfo("dhtTransferJob: selected " + startHash + " as start hash"); log.logInfo("dhtTransferJob: selected " + startHash + " as start hash");
String limitHash = PeerSelection.limitOver(this.webIndex.seedDB, startHash); String limitHash = PeerSelection.limitOver(this.webIndex.seedDB, startHash);
log.logInfo("dhtTransferJob: selected " + limitHash + " as limit hash"); log.logInfo("dhtTransferJob: selected " + limitHash + " as limit hash");
try { try {
int c = this.dhtDispatcher.selectContainersToCache( boolean enqueued = this.dhtDispatcher.selectContainersEnqueueToCloud(
startHash, startHash,
limitHash, limitHash,
dhtMaxContainerCount, dhtMaxContainerCount,
dhtMaxReferenceCount, dhtMaxReferenceCount,
2000); 2000);
log.logInfo("dhtTransferJob: Dispatcher selected " + c + " containers"); hasDoneSomething = hasDoneSomething | enqueued;
log.logInfo("dhtTransferJob: result from enqueueing: " + ((enqueued) ? "true" : "false"));
} catch (IOException e) { } catch (IOException e) {
log.logSevere("dhtTransferJob: interrupted with exception: " + e.getMessage(), e); log.logSevere("dhtTransferJob: interrupted with exception: " + e.getMessage(), e);
return false; return false;
} }
int splitted = this.dhtDispatcher.splitContainersFromCache(); }
log.logInfo("dhtTransferJob: splitted selected container in " + splitted + " parts"); if (this.dhtDispatcher.transmissionSize() >= 10) {
boolean enqueued = this.dhtDispatcher.enqueueContainersFromCache(); log.logInfo("dhtTransferJob: no dequeueing from cloud to transmission: too many concurrent sessions: " + this.dhtDispatcher.transmissionSize());
log.logInfo("dhtTransferJob: result from enqueueing: " + ((enqueued) ? "true" : "false")); } else {
boolean dequeued = this.dhtDispatcher.dequeueContainer(); boolean dequeued = this.dhtDispatcher.dequeueContainer();
hasDoneSomething = hasDoneSomething | dequeued;
log.logInfo("dhtTransferJob: result from dequeueing: " + ((dequeued) ? "true" : "false")); log.logInfo("dhtTransferJob: result from dequeueing: " + ((dequeued) ? "true" : "false"));
return dequeued; }
return hasDoneSomething;
} }
private void addURLtoErrorDB( private void addURLtoErrorDB(

@ -92,11 +92,7 @@ public class Dispatcher {
private serverProcessor<Transmission.Chunk> indexingTransmissionProcessor; private serverProcessor<Transmission.Chunk> indexingTransmissionProcessor;
// transmission object // transmission object
private Transmission transmissions; private Transmission transmission;
// caching objects
private ArrayList<indexContainer> selectedContainerCache;
private ArrayList<indexContainer>[] splittedContainerCache;
public Dispatcher( public Dispatcher(
final indexRI backend, final indexRI backend,
@ -109,15 +105,15 @@ public class Dispatcher {
this.backend = backend; this.backend = backend;
this.seeds = seeds; this.seeds = seeds;
this.log = new Log("INDEX TRANSFER DISPATCHER"); this.log = new Log("INDEX TRANSFER DISPATCHER");
this.transmissions = new Transmission( this.transmission = new Transmission(
log, log,
repository, repository,
seeds, seeds,
backend, backend,
gzipBody, gzipBody,
timeout); timeout);
this.selectedContainerCache = null; //this.selectedContainerCache = null;
this.splittedContainerCache = null; //this.splittedContainerCache = null;
int concurrentSender = Math.min(25, Math.max(10, serverProcessor.useCPU * 2 + 1)); int concurrentSender = Math.min(25, Math.max(10, serverProcessor.useCPU * 2 + 1));
indexingTransmissionProcessor = new serverProcessor<Transmission.Chunk>( indexingTransmissionProcessor = new serverProcessor<Transmission.Chunk>(
@ -127,6 +123,14 @@ public class Dispatcher {
this, "storeDocumentIndex", concurrentSender * 2, null, concurrentSender); this, "storeDocumentIndex", concurrentSender * 2, null, concurrentSender);
} }
public int cloudSize() {
return this.transmissionCloud.size();
}
public int transmissionSize() {
return this.indexingTransmissionProcessor.queueSize();
}
/** /**
* PROCESS(1) * PROCESS(1)
* select a number of index containers from the backend index. * select a number of index containers from the backend index.
@ -138,7 +142,7 @@ public class Dispatcher {
* @return * @return
* @throws IOException * @throws IOException
*/ */
public ArrayList<indexContainer> selectContainers( private ArrayList<indexContainer> selectContainers(
final String hash, final String hash,
final String limitHash, final String limitHash,
final int maxContainerCount, final int maxContainerCount,
@ -153,7 +157,7 @@ public class Dispatcher {
return containers; return containers;
} }
public ArrayList<indexContainer> selectContainers( private ArrayList<indexContainer> selectContainers(
final String hash, final String hash,
final String limitHash, final String limitHash,
final int maxContainerCount, final int maxContainerCount,
@ -190,30 +194,6 @@ public class Dispatcher {
return containers; return containers;
} }
/**
* convenience method for the selection process: put the result in an internal cache
* @param hash
* @param limitHash
* @param maxContainerCount
* @param maxtime
* @return
* @throws IOException
*/
public synchronized int selectContainersToCache(
final String hash,
final String limitHash,
final int maxContainerCount,
final int maxReferenceCount,
final int maxtime) throws IOException {
if (this.selectedContainerCache != null && this.selectedContainerCache.size() > 0) {
this.log.logInfo("selectContainersToCache: selectedContainerCache is already filled, no selection done.");
return 0;
}
this.selectedContainerCache = selectContainers(hash, limitHash, maxContainerCount, maxReferenceCount, maxtime);
this.log.logInfo("selectContainersToCache: selectedContainerCache was filled with " + this.selectedContainerCache.size() + " entries");
return this.selectedContainerCache.size();
}
/** /**
* PROCESS(2) * PROCESS(2)
* split a list of containers into partitions according to the vertical distribution scheme * split a list of containers into partitions according to the vertical distribution scheme
@ -222,7 +202,7 @@ public class Dispatcher {
* @return * @return
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public ArrayList<indexContainer>[] splitContainers(ArrayList<indexContainer> containers) { private ArrayList<indexContainer>[] splitContainers(ArrayList<indexContainer> containers) {
// init the result vector // init the result vector
int partitionCount = this.seeds.scheme.verticalPartitions(); int partitionCount = this.seeds.scheme.verticalPartitions();
@ -254,25 +234,6 @@ public class Dispatcher {
return partitions; return partitions;
} }
/**
* convenience method for splitContainers: use the container cache and write into a splitted container cache.
* @return
*/
public synchronized int splitContainersFromCache() {
if (selectedContainerCache == null || selectedContainerCache.size() == 0) {
this.log.logInfo("splitContainersFromCache: selectedContainerCache is empty, cannot do anything here.");
return 0;
}
if (splittedContainerCache != null && splittedContainerCache.length > 0) {
this.log.logInfo("splitContainersFromCache: splittedContainerCache is aready filled, doing nothing now.");
return 0;
}
this.splittedContainerCache = splitContainers(selectedContainerCache);
this.selectedContainerCache = null;
this.log.logInfo("splitContainersFromCache: splittedContainerCache filled with " + this.splittedContainerCache.length + " partitions, deleting selectedContainerCache");
return this.splittedContainerCache.length;
}
/** /**
* PROCESS(3) and PROCESS(4) * PROCESS(3) and PROCESS(4)
* put containers into cloud. This needs information about the network, * put containers into cloud. This needs information about the network,
@ -283,7 +244,7 @@ public class Dispatcher {
* stored in a cache of the Entry for later transmission to the targets, which means that * stored in a cache of the Entry for later transmission to the targets, which means that
* then no additional IO is necessary. * then no additional IO is necessary.
*/ */
public void enqueueContainers(final ArrayList<indexContainer>[] containers) { private void enqueueContainersToCloud(final ArrayList<indexContainer>[] containers) {
indexContainer lastContainer; indexContainer lastContainer;
String primaryTarget; String primaryTarget;
Transmission.Chunk entry; Transmission.Chunk entry;
@ -300,7 +261,7 @@ public class Dispatcher {
seeds.redundancy() * 3, seeds.redundancy() * 3,
true); true);
this.log.logInfo("enqueueContainers: selected " + targets.size() + " targets for primary target key " + primaryTarget + "/" + vertical + " with " + containers[vertical].size() + " index containers."); this.log.logInfo("enqueueContainers: selected " + targets.size() + " targets for primary target key " + primaryTarget + "/" + vertical + " with " + containers[vertical].size() + " index containers.");
if (entry == null) entry = transmissions.newChunk(primaryTarget, targets, lastContainer.row()); if (entry == null) entry = transmission.newChunk(primaryTarget, targets, lastContainer.row());
// fill the entry with the containers // fill the entry with the containers
for (indexContainer c: containers[vertical]) { for (indexContainer c: containers[vertical]) {
@ -312,13 +273,34 @@ public class Dispatcher {
} }
} }
public boolean enqueueContainersFromCache() { public synchronized boolean selectContainersEnqueueToCloud(
if (this.splittedContainerCache == null) { final String hash,
final String limitHash,
final int maxContainerCount,
final int maxReferenceCount,
final int maxtime) throws IOException {
ArrayList<indexContainer> selectedContainerCache = selectContainers(hash, limitHash, maxContainerCount, maxReferenceCount, maxtime);
this.log.logInfo("selectContainersToCache: selectedContainerCache was filled with " + selectedContainerCache.size() + " entries");
if (selectedContainerCache == null || selectedContainerCache.size() == 0) {
this.log.logInfo("splitContainersFromCache: selectedContainerCache is empty, cannot do anything here.");
return false;
}
ArrayList<indexContainer>[] splittedContainerCache = splitContainers(selectedContainerCache);
selectedContainerCache = null;
if (splittedContainerCache == null) {
this.log.logInfo("enqueueContainersFromCache: splittedContainerCache is empty, cannot do anything here."); this.log.logInfo("enqueueContainersFromCache: splittedContainerCache is empty, cannot do anything here.");
return false; return false;
} }
enqueueContainers(this.splittedContainerCache); this.log.logInfo("splitContainersFromCache: splittedContainerCache filled with " + splittedContainerCache.length + " partitions, deleting selectedContainerCache");
this.splittedContainerCache = null; if (splittedContainerCache.length != this.seeds.scheme.verticalPartitions()) {
this.log.logWarning("enqueueContainersFromCache: splittedContainerCache has wrong length.");
return false;
}
enqueueContainersToCloud(splittedContainerCache);
splittedContainerCache = null;
this.log.logInfo("enqueueContainersFromCache: splittedContainerCache enqueued to cloud array which has now " + this.transmissionCloud.size() + " entries."); this.log.logInfo("enqueueContainersFromCache: splittedContainerCache enqueued to cloud array which has now " + this.transmissionCloud.size() + " entries.");
return true; return true;
} }
@ -360,7 +342,7 @@ public class Dispatcher {
return chunk; return chunk;
} }
this.log.logInfo("STORE: Chunk " + chunk.primaryTarget() + " has failed to transmit index; removed peer from network"); this.log.logInfo("STORE: Chunk " + chunk.primaryTarget() + " has failed to transmit index; marked peer as busy");
if (chunk.canFinish()) { if (chunk.canFinish()) {
try { try {

Loading…
Cancel
Save