removed cache-control from low and medium priority caches which reduces memory use and computation overhead

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@774 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 20 years ago
parent 5bf7d74114
commit 495bc8bec6

@ -80,7 +80,7 @@
<form action="PerformanceMemory_p.html" method="post" enctype="multipart/form-data">
<tr class="TableHeader" valign="bottom">
<td class="small" rowspan="2">Database<br>&nbsp;</td>
<td class="small" rowspan="2">Chunk Size<br>(bytes)</td>
<td class="small" rowspan="2">Chunk Size<br>high/med/low<br>(bytes)</td>
<td class="small" colspan="5">#Slots</td>
<td class="small" colspan="5">Memory Occupation (KBytes)</td>
<td class="small" rowspan="2">Description<br>&nbsp;</td>
@ -101,7 +101,7 @@
<tr class="TableCellDark">
<td class="small" align="left">RWI Assortment Cluster</td>
<td class="small" align="right">#[chunkRWI]#</td>
<td class="small" align="center">#[chunkRWI]#</td>
<td class="small" align="right">#[slreqRWI]#</td>
<td class="small" align="right">#[slempRWI]#</td>
<td class="small" align="right">#[slhigRWI]#</td>
@ -119,7 +119,7 @@ cache will speed up crawls with a depth > 3.</td>
<tr class="TableCellDark">
<td class="small" align="left">HTTP Response Header</td>
<td class="small" align="right">#[chunkHTTP]#</td>
<td class="small" align="center">#[chunkHTTP]#</td>
<td class="small" align="right">#[slreqHTTP]#</td>
<td class="small" align="right">#[slempHTTP]#</td>
<td class="small" align="right">#[slhigHTTP]#</td>
@ -137,7 +137,7 @@ Increasing this cache will be most important for a fast proxy mode.</td>
<tr class="TableCellDark">
<td class="small" align="left">'loaded' URLs</td>
<td class="small" align="right">#[chunkLURL]#</td>
<td class="small" align="center">#[chunkLURL]#</td>
<td class="small" align="right">#[slreqLURL]#</td>
<td class="small" align="right">#[slempLURL]#</td>
<td class="small" align="right">#[slhigLURL]#</td>
@ -154,7 +154,7 @@ This cache is very important for a fast search process. Increasing the cache siz
<tr class="TableCellDark">
<td class="small" align="left">'noticed' URLs</td>
<td class="small" align="right">#[chunkNURL]#</td>
<td class="small" align="center">#[chunkNURL]#</td>
<td class="small" align="right">#[slreqNURL]#</td>
<td class="small" align="right">#[slempNURL]#</td>
<td class="small" align="right">#[slhigNURL]#</td>
@ -171,7 +171,7 @@ Increasing the cache size will result in faster double-check during URL recognit
<tr class="TableCellDark">
<td class="small" align="left">'error' URLs</td>
<td class="small" align="right">#[chunkEURL]#</td>
<td class="small" align="center">#[chunkEURL]#</td>
<td class="small" align="right">#[slreqEURL]#</td>
<td class="small" align="right">#[slempEURL]#</td>
<td class="small" align="right">#[slhigEURL]#</td>
@ -188,7 +188,7 @@ Increasing the cache size will most probably speed up crawling slightly, but not
<tr class="TableCellDark">
<td class="small" align="left">DHT Control</td>
<td class="small" align="right">#[chunkDHT]#</td>
<td class="small" align="center">#[chunkDHT]#</td>
<td class="small" align="right">#[slreqDHT]#</td>
<td class="small" align="right">#[slempDHT]#</td>
<td class="small" align="right">#[slhigDHT]#</td>
@ -206,7 +206,7 @@ Increasing this cache may speed up many functions, but we need to test this to s
<tr class="TableCellDark">
<td class="small" align="left">Messages</td>
<td class="small" align="right">#[chunkMessage]#</td>
<td class="small" align="center">#[chunkMessage]#</td>
<td class="small" align="right">#[slreqMessage]#</td>
<td class="small" align="right">#[slempMessage]#</td>
<td class="small" align="right">#[slhigMessage]#</td>
@ -222,7 +222,7 @@ Increasing this cache may speed up many functions, but we need to test this to s
<tr class="TableCellDark">
<td class="small" align="left">Wiki</td>
<td class="small" align="right">#[chunkWiki]#</td>
<td class="small" align="center">#[chunkWiki]#</td>
<td class="small" align="right">#[slreqWiki]#</td>
<td class="small" align="right">#[slempWiki]#</td>
<td class="small" align="right">#[slhigWiki]#</td>
@ -240,7 +240,7 @@ Increasing this cache may speed up access to the wiki pages.</td>
<tr class="TableCellDark">
<td class="small" align="left">News</td>
<td class="small" align="right">#[chunkNews]#</td>
<td class="small" align="center">#[chunkNews]#</td>
<td class="small" align="right">#[slreqNews]#</td>
<td class="small" align="right">#[slempNews]#</td>
<td class="small" align="right">#[slhigNews]#</td>

@ -63,8 +63,8 @@ public class PerformanceMemory_p {
private static final int MB = 1024 * KB;
private static Map defaultSettings = null;
private static int[] slt;
private static int req,chk,usd,bst,god;
private static int[] slt,chk;
private static int req,usd,bst,god;
private static long usedTotal, currTotal, dfltTotal, goodTotal, bestTotal;
@ -210,12 +210,12 @@ public class PerformanceMemory_p {
}
private static void putprop(serverObjects prop, serverSwitch env, String db, String set) {
usd = chk * (slt[1]+slt[2]+slt[3]);
bst = (((chk * req) >> 10) + 1) << 10;
usd = chk[0]*slt[3] + chk[1]*slt[2] + chk[2]*slt[1];
bst = (((chk[2] * req) >> 10) + 1) << 10;
god = (((bst / (1+slt[1]+slt[2]+slt[3]) * slt[1]) >> 10) + 1) << 10;
if (set.equals("setGood")) env.setConfig("ramCache" + db, god);
if (set.equals("setBest")) env.setConfig("ramCache" + db, bst);
prop.put("chunk" + db, chk);
prop.put("chunk" + db, chk[2] + "/" + chk[1] + "/" + chk[0]);
prop.put("slreq" + db, req);
prop.put("slemp" + db, slt[0]);
prop.put("slhig" + db, slt[1]);

@ -82,7 +82,7 @@ public class messageBoard {
return database.size();
}
public int dbCacheChunkSize() {
public int[] dbCacheChunkSize() {
return database.cacheChunkSize();
}

@ -54,6 +54,7 @@ import java.util.TimeZone;
import de.anomic.kelondro.kelondroDyn;
import de.anomic.kelondro.kelondroMap;
import de.anomic.kelondro.kelondroRecords;
import de.anomic.server.serverCodings;
public class wikiBoard {
@ -92,8 +93,14 @@ public class wikiBoard {
return datbase.size() + bkpbase.size();
}
public int dbCacheChunkSize() {
return (datbase.cacheChunkSize() + bkpbase.cacheChunkSize()) / 2;
public int[] dbCacheChunkSize() {
int[] db = datbase.cacheChunkSize();
int[] bk = bkpbase.cacheChunkSize();
int[] i = new int[3];
i[kelondroRecords.CP_LOW] = (db[kelondroRecords.CP_LOW] + bk[kelondroRecords.CP_LOW]) / 2;
i[kelondroRecords.CP_MEDIUM] = (db[kelondroRecords.CP_MEDIUM] + bk[kelondroRecords.CP_MEDIUM]) / 2;
i[kelondroRecords.CP_HIGH] = (db[kelondroRecords.CP_HIGH] + bk[kelondroRecords.CP_HIGH]) / 2;
return i;
}
public int[] dbCacheFillStatus() {

@ -133,7 +133,7 @@ public class kelondroMap {
return dyn.columnSize(0);
}
public int cacheChunkSize() {
public int[] cacheChunkSize() {
return dyn.cacheChunkSize();
}

@ -80,18 +80,17 @@ public class kelondroRecords {
// constants
private static final int NUL = Integer.MIN_VALUE; // the meta value for the kelondroRecords' NUL abstraction
public static final long memBlock = 500000; // do not fill cache further if the amount of available memory is less that this
public static final long memKcolb = 10000000; // if the amount of available memory is greater than this, do not use cache size to block, simply use memory
private static final long memBlock = 500000; // do not fill cache further if the amount of available memory is less that this
// memory calculation
private static final int element_in_cache = 52;
private static final int cache_control_entry = 96;
// caching flags
protected static final int CP_NONE = -1; // cache priority none; entry shall not be cached
protected static final int CP_LOW = 0; // cache priority low; entry may be cached
protected static final int CP_MEDIUM = 1; // cache priority medium; entry shall be cached
protected static final int CP_HIGH = 2; // cache priority high; entry must be cached
public static final int CP_NONE = -1; // cache priority none; entry shall not be cached
public static final int CP_LOW = 0; // cache priority low; entry may be cached
public static final int CP_MEDIUM = 1; // cache priority medium; entry shall be cached
public static final int CP_HIGH = 2; // cache priority high; entry must be cached
// static seek pointers
private static long POS_MAGIC = 0; // 1 byte, byte: file type magic
@ -144,7 +143,7 @@ public class kelondroRecords {
private HashMap[] XcacheHeaders; // the cache; holds overhead values and key element
private int XcacheSize; // number of cache records
private long XcacheStartup; // startup time; for cache aging
private kelondroMScoreCluster XcacheScore; // controls cache aging
private kelondroMScoreCluster cacheScore; // controls cache aging
public kelondroRecords(File file, long buffersize /* bytes */,
@ -319,20 +318,28 @@ public class kelondroRecords {
if (buffersize <= 0) {
this.XcacheSize = 0;
this.XcacheHeaders = null;
this.XcacheScore = null;
this.cacheScore = null;
} else {
if ((buffersize / cacheChunkSize(false)) > size()) {
this.XcacheSize = (int) (buffersize / cacheChunkSize(false));
this.XcacheScore = null;
this.cacheScore = null; // no cache control because we have more cache slots than database entries
} else {
this.XcacheSize = (int) (buffersize / cacheChunkSize(true));
this.XcacheScore = new kelondroMScoreCluster();
this.cacheScore = new kelondroMScoreCluster(); // cache control of CP_HIGH caches
}
this.XcacheHeaders = new HashMap[]{new HashMap(), new HashMap(), new HashMap()};
}
this.XcacheStartup = System.currentTimeMillis();
}
private static final long max = Runtime.getRuntime().maxMemory();
private static final Runtime runtime = Runtime.getRuntime();
private static long availableMemory() {
// memory that is available including increasing total memory up to maximum
return max - runtime.totalMemory() + runtime.freeMemory();
}
public File file() {
if (filename == null) return null;
return new File(filename);
@ -342,8 +349,16 @@ public class kelondroRecords {
return this.headchunksize + element_in_cache + ((cacheControl) ? cache_control_entry : 0);
}
public int cacheChunkSize() {
return cacheChunkSize(this.XcacheScore != null);
public int[] cacheChunkSize() {
// returns three integers:
// #0: chunk size of CP_LOW - priority entries
// #1: chunk size of CP_MEDIUM - priority entries
// #2: chunk size of CP_HIGH - priority entries
int[] i = new int[3];
i[CP_LOW] = cacheChunkSize(false);
i[CP_MEDIUM] = cacheChunkSize(false);
i[CP_HIGH] = cacheChunkSize(this.cacheScore != null);
return i;
}
public int[] cacheFillStatus() {
@ -367,65 +382,100 @@ public class kelondroRecords {
protected void deleteNode(Handle handle) throws IOException {
if (XcacheSize != 0) {
synchronized (XcacheHeaders) {
if (XcacheScore == null) {
XcacheHeaders[0].remove(handle);
XcacheHeaders[1].remove(handle);
XcacheHeaders[2].remove(handle);
} else if (XcacheHeaders[0].get(handle) != null) {
XcacheScore.deleteScore(handle);
XcacheHeaders[0].remove(handle);
} else if (XcacheHeaders[1].get(handle) != null) {
XcacheScore.deleteScore(handle);
XcacheHeaders[1].remove(handle);
} else if (XcacheHeaders[2].get(handle) != null) {
XcacheScore.deleteScore(handle);
XcacheHeaders[2].remove(handle);
if (cacheScore == null) {
XcacheHeaders[CP_LOW].remove(handle);
XcacheHeaders[CP_MEDIUM].remove(handle);
XcacheHeaders[CP_HIGH].remove(handle);
} else if (XcacheHeaders[CP_HIGH].get(handle) != null) {
// remove handle from cache-control
cacheScore.deleteScore(handle);
XcacheHeaders[CP_HIGH].remove(handle);
} else if (XcacheHeaders[CP_MEDIUM].get(handle) != null) {
// no cache control for medium-priority entries
XcacheHeaders[CP_MEDIUM].remove(handle);
} else if (XcacheHeaders[CP_LOW].get(handle) != null) {
// no cache control for low-priority entries
XcacheHeaders[CP_LOW].remove(handle);
}
}
}
dispose(handle);
}
private void checkCacheSpace(int forPriority) {
private boolean cacheSpace(int forPriority) {
// check for space in cache
// should be only called within a synchronized(XcacheHeaders) environment
if (XcacheSize == 0) return; // no caching
if (XcacheScore == null) return; // no cache control (== no deletion)
long free = Runtime.getRuntime().freeMemory();
long cs = XcacheHeaders[0].size() + XcacheHeaders[1].size() + XcacheHeaders[2].size();
if (cs == 0) return;
if ((cs < XcacheSize) && (free >= memBlock)) return; // no need to flush cache space
// returns true if it is allowed to add another entry to the cache
// returns false if the cache is considered to be full
if (forPriority == CP_NONE) return false;
if (XcacheSize == 0) return false; // no caching
long cs = XcacheHeaders[CP_LOW].size() + XcacheHeaders[CP_MEDIUM].size() + XcacheHeaders[CP_HIGH].size();
if (cs == 0) return true; // nothing there to flush
if ((cs < XcacheSize) && (availableMemory() >= memBlock)) return true; // no need to flush cache space
Handle delkey;
// (free < memKcolb)) ??
// delete one entry
try {
delkey = (Handle) XcacheScore.getMinObject(); // error (see below) here
int cp = priorityOfCache(delkey);
if (cp <= forPriority) {
// deletion is ok, it affects an entry that has less priority
XcacheScore.deleteScore(delkey);
XcacheHeaders[cp].remove(delkey);
// delete one entry. distinguish between different priority cases:
if (forPriority == CP_LOW) {
// remove only from low-priority cache
if (this.XcacheHeaders[CP_LOW].size() != 0) {
// just delete any of the low-priority entries
delkey = (Handle) this.XcacheHeaders[CP_LOW].keySet().iterator().next();
XcacheHeaders[CP_LOW].remove(delkey);
return true;
} else {
for (int i = 0; i < 3; i++) {
if (XcacheHeaders[i].size() > 0) {
delkey = (Handle) XcacheHeaders[i].keySet().iterator().next();
XcacheScore.deleteScore(delkey);
XcacheHeaders[i].remove(delkey);
break;
}
}
// we cannot delete any entry, therefore there is no space for another entry
return false;
}
} else if (forPriority == CP_MEDIUM) {
if (this.XcacheHeaders[CP_LOW].size() != 0) {
// just delete any of the low-priority entries
delkey = (Handle) this.XcacheHeaders[CP_LOW].keySet().iterator().next();
XcacheHeaders[CP_LOW].remove(delkey);
return true;
} else if (this.XcacheHeaders[CP_MEDIUM].size() != 0) {
// just delete any of the medium-priority entries
delkey = (Handle) this.XcacheHeaders[CP_MEDIUM].keySet().iterator().next();
XcacheHeaders[CP_MEDIUM].remove(delkey);
return true;
} else {
// we cannot delete any entry, therefore there is no space for another entry
return false;
}
} else {
// request for a high-priority entry
if (this.XcacheHeaders[CP_LOW].size() != 0) {
// just delete any of the low-priority entries
delkey = (Handle) this.XcacheHeaders[CP_LOW].keySet().iterator().next();
XcacheHeaders[CP_LOW].remove(delkey);
return true;
} else if (this.XcacheHeaders[CP_MEDIUM].size() != 0) {
// just delete any of the medium-priority entries
delkey = (Handle) this.XcacheHeaders[CP_MEDIUM].keySet().iterator().next();
XcacheHeaders[CP_MEDIUM].remove(delkey);
return true;
} else if (cacheScore == null) {
// no cache-control of high-priority cache
// the cache is considered as full
return false;
} else try {
// delete one from the high-priority entries
// use the cache-control to find the right object
delkey = (Handle) cacheScore.getMinObject();
cacheScore.deleteScore(delkey);
XcacheHeaders[CP_HIGH].remove(delkey);
return true;
} catch (NoSuchElementException e) {
// this is a strange error and could be caused by internal java problems
// we simply clear the cache
String error = "cachScore error: " + e.getMessage() + "; cachesize=" + XcacheSize + ", cache.size()=[" + XcacheHeaders[0].size() + "," + XcacheHeaders[1].size() + "," + XcacheHeaders[2].size() + "], cacheScore.size()=" + cacheScore.size();
this.cacheScore = new kelondroMScoreCluster();
this.XcacheHeaders[0] = new HashMap();
this.XcacheHeaders[1] = new HashMap();
this.XcacheHeaders[2] = new HashMap();
throw new kelondroException(filename, error);
}
} catch (NoSuchElementException e) {
System.out.println("strange kelondroRecords error: " + e.getMessage() + "; cachesize=" + XcacheSize + ", cache.size()=[" + XcacheHeaders[0].size() + "," + XcacheHeaders[1].size() + "," + XcacheHeaders[2].size() + "], cacheScore.size()=" + XcacheScore.size());
// this is a strange error and could be caused by internal java problems
// we simply clear the cache
this.XcacheScore = new kelondroMScoreCluster();
this.XcacheHeaders[0] = new HashMap();
this.XcacheHeaders[1] = new HashMap();
this.XcacheHeaders[2] = new HashMap();
}
delkey = null;
}
private int priorityOfCache(Handle handle) {
@ -538,11 +588,19 @@ public class kelondroRecords {
}
this.headChanged = false;
} else synchronized(XcacheHeaders) {
byte[] cacheEntry = (byte[]) XcacheHeaders[CP_HIGH].get(this.handle); // first try
if (cacheEntry == null) cacheEntry = (byte[]) XcacheHeaders[CP_MEDIUM].get(this.handle); // second try
if (cacheEntry == null) cacheEntry = (byte[]) XcacheHeaders[CP_LOW].get(this.handle); // third try
byte[] cacheEntry = null;
int cp = CP_HIGH;
cacheEntry = (byte[]) XcacheHeaders[CP_HIGH].get(this.handle); // first try
if (cacheEntry == null) {
cacheEntry = (byte[]) XcacheHeaders[CP_MEDIUM].get(this.handle); // second try
cp = CP_MEDIUM;
}
if (cacheEntry == null) {
cacheEntry = (byte[]) XcacheHeaders[CP_LOW].get(this.handle); // third try
cp = CP_LOW;
}
if (cacheEntry == null) {
// read overhead and key
// cache miss, we read overhead and key from file
//System.out.println("**CACHE miss for " + this.handle.index + "**");
this.headChunk = new byte[headchunksize];
//this.tailChunk = new byte[tailchunksize];
@ -552,22 +610,28 @@ public class kelondroRecords {
//entryFile.read(this.tailChunk, 0, this.tailChunk.length);
}
this.headChanged = true; // provoke a cache store
int cp = CP_HIGH;
cp = CP_HIGH;
if (OHHANDLEC == 3) {
Handle l = getOHHandle(1);
Handle r = getOHHandle(2);
if ((l == null) && (r == null)) cp = CP_LOW;
else if ((l == null) || (r == null)) cp = CP_MEDIUM;
}
checkCacheSpace(cp);
updateNodeCache(cp);
// if space left in cache, copy these value to the cache
if (XcacheSize > 0) {
XcacheHeaders[CP_LOW].remove(this.handle);
XcacheHeaders[CP_MEDIUM].remove(this.handle);
XcacheHeaders[CP_HIGH].remove(this.handle);
} if (cacheSpace(cp)) updateNodeCache(cp);
} else {
// cache hit, copy overhead and key from cache
//System.out.println("**CACHE HIT for " + this.handle.index + "**");
// copy cache entry
this.headChunk = new byte[headchunksize];
System.arraycopy(cacheEntry, 0, this.headChunk, 0, headchunksize);
// update cache scores to announce this cache hit
if (XcacheScore != null) XcacheScore.setScore(this.handle, (int) ((System.currentTimeMillis() - XcacheStartup) / 1000));
if ((cacheScore != null) && (cp == CP_HIGH)) {
cacheScore.setScore(this.handle, (int) ((System.currentTimeMillis() - XcacheStartup) / 1000));
}
this.headChanged = false;
}
}
@ -688,7 +752,12 @@ public class kelondroRecords {
//System.out.print("#write "); printChunk(this.handle, this.headChunk); System.out.println();
entryFile.write(this.headChunk);
}
updateNodeCache(cachePriority);
if (XcacheSize > 0) {
XcacheHeaders[CP_LOW].remove(this.handle);
XcacheHeaders[CP_MEDIUM].remove(this.handle);
XcacheHeaders[CP_HIGH].remove(this.handle);
}
if (cacheSpace(cachePriority)) updateNodeCache(cachePriority);
}
// save tail
@ -739,13 +808,9 @@ public class kelondroRecords {
if (priority == CP_NONE) return; // it is not wanted that this shall be cached
if (XcacheSize == 0) return; // we do not use the cache
int cs = XcacheHeaders[CP_LOW].size() + XcacheHeaders[CP_MEDIUM].size() + XcacheHeaders[CP_HIGH].size();
if ((cs >= XcacheSize) && (XcacheScore == null)) return; // no cache update if cache is full and not controlled
if ((cs >= XcacheSize) && (priority == CP_LOW)) return; // no cache update if cache is full and new entry has low priority
if (cs >= XcacheSize) return; // no cache update if cache is full
synchronized (XcacheHeaders) {
// remember size to evaluate a cache size check need
int sizeBefore = cs;
//long memBefore = Runtime.getRuntime().freeMemory();
// generate cache entry
byte[] cacheEntry = new byte[headchunksize];
System.arraycopy(headChunk, 0, cacheEntry, 0, headchunksize);
@ -757,16 +822,14 @@ public class kelondroRecords {
if (priority != CP_MEDIUM) XcacheHeaders[CP_MEDIUM].remove(cacheHandle);
if (priority != CP_HIGH) XcacheHeaders[CP_HIGH].remove(cacheHandle);
XcacheHeaders[priority].put(cacheHandle, cacheEntry);
if (XcacheScore != null) XcacheScore.setScore(cacheHandle, (int) ((System.currentTimeMillis() - XcacheStartup) / 1000));
if ((cacheScore != null) && (priority == CP_HIGH)) {
cacheScore.setScore(cacheHandle, (int) ((System.currentTimeMillis() - XcacheStartup) / 1000));
}
// delete the cache entry buffer
cacheEntry = null;
cacheHandle = null;
//System.out.println("kelondroRecords cache4" + filename + ": cache record size = " + (memBefore - Runtime.getRuntime().freeMemory()) + " bytes" + ((newentry) ? " new" : ""));
// check cache size
cs = XcacheHeaders[CP_LOW].size() + XcacheHeaders[CP_MEDIUM].size() + XcacheHeaders[CP_HIGH].size();
if (cs > sizeBefore) checkCacheSpace(priority);
//System.out.println("kelondroRecords cache4" + filename + ": " + XcacheHeaders.size() + " entries, " + XcacheSize + " allowed.");
//printCache();
}
}

@ -257,8 +257,8 @@ public class kelondroStack extends kelondroRecords {
Handle h = getHandle(side);
if (h == null) return null;
if (dist >= size()) return null; // that would exceed the stack
while (dist-- > 0) h = getNode(h, null, 0).getOHHandle(dir); // track through elements
return getNode(h, null, 0);
while (dist-- > 0) h = getNode(h).getOHHandle(dir); // track through elements
return getNode(h);
}

@ -1370,7 +1370,7 @@ public class kelondroTree extends kelondroRecords implements Comparator {
int steps = 0;
while (true) {
if (testFile.exists()) testFile.delete();
tt = new kelondroTree(testFile, 2000, 4 ,4);
tt = new kelondroTree(testFile, 200, 4 ,4);
steps = 10 + ((int) System.currentTimeMillis() % 7) * (((int) System.currentTimeMillis() + 17) % 11);
t = s;
d = "";

@ -139,7 +139,7 @@ public final class plasmaHTCache {
return responseHeaderDB.size();
}
public int dbCacheChunkSize() {
public int[] dbCacheChunkSize() {
return responseHeaderDB.cacheChunkSize();
}

@ -85,7 +85,7 @@ public final class plasmaWordIndex {
return ramCache.assortmentsSizes();
}
public int assortmentsCacheChunkSizeAvg() {
public int[] assortmentsCacheChunkSizeAvg() {
return ramCache.assortmentsCacheChunkSizeAvg();
}

@ -148,10 +148,10 @@ public final class plasmaWordIndexAssortment {
try {
oldrow = assortments.put(row);
} catch (IOException e) {
log.logSevere("storeAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB", e);
log.logSevere("storeAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e);
resetDatabase();
} catch (kelondroException e) {
log.logSevere("storeAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB", e);
log.logSevere("storeAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e);
resetDatabase();
}
if (oldrow != null) throw new RuntimeException("Store to assortment ambiguous");
@ -164,11 +164,11 @@ public final class plasmaWordIndexAssortment {
try {
row = assortments.remove(wordHash.getBytes());
} catch (IOException e) {
log.logSevere("removeAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB", e);
log.logSevere("removeAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e);
resetDatabase();
return null;
} catch (kelondroException e) {
log.logSevere("removeAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB", e);
log.logSevere("removeAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e);
resetDatabase();
return null;
}
@ -199,11 +199,11 @@ public final class plasmaWordIndexAssortment {
try {
return assortments.keys(up, rot, startWordHash.getBytes());
} catch (IOException e) {
log.logSevere("iterateAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB", e);
log.logSevere("iterateAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e);
resetDatabase();
return null;
} catch (kelondroException e) {
log.logSevere("iterateAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB", e);
log.logSevere("iterateAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e);
resetDatabase();
return null;
}
@ -213,7 +213,7 @@ public final class plasmaWordIndexAssortment {
return assortments.size();
}
public int cacheChunkSize() {
public int[] cacheChunkSize() {
return assortments.cacheChunkSize();
}

@ -50,6 +50,7 @@ import java.io.File;
import java.util.HashSet;
import java.util.Iterator;
import de.anomic.kelondro.kelondroRecords;
import de.anomic.kelondro.kelondroMergeIterator;
import de.anomic.server.logging.serverLog;
@ -191,10 +192,19 @@ public final class plasmaWordIndexAssortmentCluster {
return sizes;
}
public int cacheChunkSizeAvg() {
int total = 0;
for (int i = 0; i < clusterCount; i++) total += assortments[i].cacheChunkSize();
return total / clusterCount;
public int[] cacheChunkSizeAvg() {
int[] i = new int[]{0, 0, 0};
int[] a = new int[3];
for (int j = 0; j < clusterCount; j++) {
a = assortments[j].cacheChunkSize();
i[kelondroRecords.CP_LOW] += a[kelondroRecords.CP_LOW];
i[kelondroRecords.CP_MEDIUM] += a[kelondroRecords.CP_MEDIUM];
i[kelondroRecords.CP_HIGH] += a[kelondroRecords.CP_HIGH];
}
a[kelondroRecords.CP_LOW] = i[kelondroRecords.CP_LOW] / clusterCount;
a[kelondroRecords.CP_MEDIUM] = i[kelondroRecords.CP_MEDIUM] / clusterCount;
a[kelondroRecords.CP_HIGH] = i[kelondroRecords.CP_HIGH] / clusterCount;
return a;
}
public int[] cacheFillStatusCml() {

@ -265,7 +265,7 @@ public final class plasmaWordIndexCache implements plasmaWordIndexInterface {
return assortmentCluster.sizes();
}
public int assortmentsCacheChunkSizeAvg() {
public int[] assortmentsCacheChunkSizeAvg() {
return assortmentCluster.cacheChunkSizeAvg();
}

@ -0,0 +1,68 @@
// serverMemory.java
// -------------------------------------------
// (C) by Michael Peter Christen; mc@anomic.de
// first published on http://www.anomic.de
// Frankfurt, Germany, 2005
// Created 22.09.2005
//
// $LastChangedDate: 2005-09-21 16:21:45 +0200 (Wed, 21 Sep 2005) $
// $LastChangedRevision: 763 $
// $LastChangedBy: orbiter $
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
// Using this software in any meaning (reading, learning, copying, compiling,
// running) means that you agree that the Author(s) is (are) not responsible
// for cost, loss of data or any harm that may be caused directly or indirectly
// by usage of this softare or this documentation. The usage of this software
// is on your own risk. The installation and usage (starting/running) of this
// software may allow other people or application to access your computer and
// any attached devices and is highly dependent on the configuration of the
// software which must be done by the user of the software; the author(s) is
// (are) also not responsible for proper configuration and usage of the
// software, even if provoked by documentation provided together with
// the software.
//
// Any changes to this file according to the GPL as documented in the file
// gpl.txt aside this file in the shipment you received can be done to the
// lines that follows this copyright notice here, but changes must not be
// done inside the copyright notive above. A re-distribution must contain
// the intact and unchanged copyright notice.
// Contributions and changes to the program code must be marked as such.
package de.anomic.server;
public class serverMemory {
public static final long max = Runtime.getRuntime().maxMemory();
private static final Runtime runtime = Runtime.getRuntime();
public static long free() {
// memory that is free without increasing of total memory takenn from os
return runtime.freeMemory();
}
public static long available() {
// memory that is available including increasing total memory up to maximum
return max - runtime.totalMemory() + runtime.freeMemory();
}
public static long used() {
// memory that is currently bound in objects
return runtime.totalMemory() - runtime.freeMemory();
}
}

@ -89,7 +89,7 @@ public class yacyNewsDB {
news = createDB(path, bufferkb);
}
public int dbCacheChunkSize() {
public int[] dbCacheChunkSize() {
return news.cacheChunkSize();
}

@ -103,7 +103,7 @@ public class yacyNewsPool {
return newsDB.size();
}
public int dbCacheChunkSize() {
public int[] dbCacheChunkSize() {
return newsDB.dbCacheChunkSize();
}

@ -58,6 +58,7 @@ import de.anomic.kelondro.kelondroDyn;
import de.anomic.kelondro.kelondroException;
import de.anomic.kelondro.kelondroMScoreCluster;
import de.anomic.kelondro.kelondroMap;
import de.anomic.kelondro.kelondroRecords;
import de.anomic.plasma.plasmaSwitchboard;
import de.anomic.server.serverCore;
import de.anomic.server.serverSwitch;
@ -148,8 +149,15 @@ public final class yacySeedDB {
} catch (IOException e) {}
}
public int dbCacheChunkSize() {
return (seedActiveDB.cacheChunkSize() + seedPassiveDB.cacheChunkSize() + seedPotentialDB.cacheChunkSize()) / 3;
public int[] dbCacheChunkSize() {
int[] ac = seedActiveDB.cacheChunkSize();
int[] pa = seedPassiveDB.cacheChunkSize();
int[] po = seedPotentialDB.cacheChunkSize();
int[] i = new int[3];
i[kelondroRecords.CP_LOW] = (ac[kelondroRecords.CP_LOW] + pa[kelondroRecords.CP_LOW] + po[kelondroRecords.CP_LOW]) / 3;
i[kelondroRecords.CP_MEDIUM] = (ac[kelondroRecords.CP_MEDIUM] + pa[kelondroRecords.CP_MEDIUM] + po[kelondroRecords.CP_MEDIUM]) / 3;
i[kelondroRecords.CP_HIGH] = (ac[kelondroRecords.CP_HIGH] + pa[kelondroRecords.CP_HIGH] + po[kelondroRecords.CP_HIGH]) / 3;
return i;
}
public int[] dbCacheFillStatus() {

Loading…
Cancel
Save