fixed to prevent that too many open connections exist

- create less connections at maximum (smaller httpc connection pool size)
- create less connections per host (2, standard required by RFC)
- do not start DHT distributions if there are too many open connections
- clear open/idle connections earlier; run cleaner more often

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6839 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 15 years ago
parent a9b9bf667b
commit 7a05db0fcb

@ -125,9 +125,11 @@ public class Client {
*/
// conManager.getParams().setDefaultMaxConnectionsPerHost(4); // default 2
HostConfiguration localHostConfiguration = new HostConfiguration();
conManager.getParams().setMaxTotalConnections(200); // Proxy may need many connections
conManager.getParams().setMaxTotalConnections(50); // Proxy may need many connections
conManager.getParams().setConnectionTimeout(60000); // set a default timeout
conManager.getParams().setDefaultMaxConnectionsPerHost(10);
conManager.getParams().setDefaultMaxConnectionsPerHost(2);
localHostConfiguration.setHost("0:0:0:0:0:0:0:1%0");
conManager.getParams().setMaxConnectionsPerHost(localHostConfiguration, 100);
localHostConfiguration.setHost("localhost");
conManager.getParams().setMaxConnectionsPerHost(localHostConfiguration, 100);
localHostConfiguration.setHost("127.0.0.1");
@ -150,13 +152,13 @@ public class Client {
*
* this is the time the method is callable, not the time it is called
*/
private static final int cleanupIntervall = 60000;
private static final int cleanupIntervall = 15000;
/**
* close connections when they are not used for this time
*
* or otherwise: hold connections this time open to reuse them
*/
private static final long closeConnectionsAfterMillis = 120000;
private static final long closeConnectionsAfterMillis = 12000;
/**
* time the last cleanup was started
*/

@ -1997,10 +1997,14 @@ public final class Switchboard extends serverSwitch {
return false;
}
boolean hasDoneSomething = false;
if (this.dhtDispatcher.cloudSize() > this.peers.scheme.verticalPartitions() * 4) {
// accumulate RWIs to transmission cloud
if (this.dhtDispatcher.cloudSize() > this.peers.scheme.verticalPartitions() * 2) {
log.logInfo("dhtTransferJob: no selection, too many entries in transmission cloud: " + this.dhtDispatcher.cloudSize());
} else if (MemoryControl.available() < 1024*1024*25) {
log.logInfo("dhtTransferJob: no selection, too less memory available : " + (MemoryControl.available() / 1024 / 1024) + " MB");
log.logInfo("dhtTransferJob: no selection, too less memory available : " + (MemoryControl.available() / 1024 / 1024) + " MB");
} else if (Client.connectionCount() > 10) {
log.logInfo("dhtTransferJob: too many connections in httpc pool : " + Client.connectionCount());
} else {
byte[] startHash = null, limitHash = null;
int tries = 10;
@ -2025,8 +2029,12 @@ public final class Switchboard extends serverSwitch {
hasDoneSomething = hasDoneSomething | enqueued;
log.logInfo("dhtTransferJob: result from enqueueing: " + ((enqueued) ? "true" : "false"));
}
// check if we can deliver entries to other peers
if (this.dhtDispatcher.transmissionSize() >= 10) {
log.logInfo("dhtTransferJob: no dequeueing from cloud to transmission: too many concurrent sessions: " + this.dhtDispatcher.transmissionSize());
} else if (Client.connectionCount() > 10) {
log.logInfo("dhtTransferJob: too many connections in httpc pool : " + Client.connectionCount());
} else {
boolean dequeued = this.dhtDispatcher.dequeueContainer();
hasDoneSomething = hasDoneSomething | dequeued;

Loading…
Cancel
Save