diff --git a/htroot/PerformanceMemory_p.html b/htroot/PerformanceMemory_p.html index 735a77430..a9cf922d9 100644 --- a/htroot/PerformanceMemory_p.html +++ b/htroot/PerformanceMemory_p.html @@ -80,7 +80,7 @@
Database
  -Chunk Size
(bytes) +Chunk Size
high/med/low
(bytes) #Slots Memory Occupation (KBytes) Description
  @@ -101,7 +101,7 @@ RWI Assortment Cluster -#[chunkRWI]# +#[chunkRWI]# #[slreqRWI]# #[slempRWI]# #[slhigRWI]# @@ -119,7 +119,7 @@ cache will speed up crawls with a depth > 3. HTTP Response Header -#[chunkHTTP]# +#[chunkHTTP]# #[slreqHTTP]# #[slempHTTP]# #[slhigHTTP]# @@ -137,7 +137,7 @@ Increasing this cache will be most important for a fast proxy mode. 'loaded' URLs -#[chunkLURL]# +#[chunkLURL]# #[slreqLURL]# #[slempLURL]# #[slhigLURL]# @@ -154,7 +154,7 @@ This cache is very important for a fast search process. Increasing the cache siz 'noticed' URLs -#[chunkNURL]# +#[chunkNURL]# #[slreqNURL]# #[slempNURL]# #[slhigNURL]# @@ -171,7 +171,7 @@ Increasing the cache size will result in faster double-check during URL recognit 'error' URLs -#[chunkEURL]# +#[chunkEURL]# #[slreqEURL]# #[slempEURL]# #[slhigEURL]# @@ -188,7 +188,7 @@ Increasing the cache size will most probably speed up crawling slightly, but not DHT Control -#[chunkDHT]# +#[chunkDHT]# #[slreqDHT]# #[slempDHT]# #[slhigDHT]# @@ -206,7 +206,7 @@ Increasing this cache may speed up many functions, but we need to test this to s Messages -#[chunkMessage]# +#[chunkMessage]# #[slreqMessage]# #[slempMessage]# #[slhigMessage]# @@ -222,7 +222,7 @@ Increasing this cache may speed up many functions, but we need to test this to s Wiki -#[chunkWiki]# +#[chunkWiki]# #[slreqWiki]# #[slempWiki]# #[slhigWiki]# @@ -240,7 +240,7 @@ Increasing this cache may speed up access to the wiki pages. News -#[chunkNews]# +#[chunkNews]# #[slreqNews]# #[slempNews]# #[slhigNews]# diff --git a/htroot/PerformanceMemory_p.java b/htroot/PerformanceMemory_p.java index 5f67f408e..36a279007 100644 --- a/htroot/PerformanceMemory_p.java +++ b/htroot/PerformanceMemory_p.java @@ -63,8 +63,8 @@ public class PerformanceMemory_p { private static final int MB = 1024 * KB; private static Map defaultSettings = null; - private static int[] slt; - private static int req,chk,usd,bst,god; + private static int[] slt,chk; + private static int req,usd,bst,god; private static long usedTotal, currTotal, dfltTotal, goodTotal, bestTotal; @@ -210,12 +210,12 @@ public class PerformanceMemory_p { } private static void putprop(serverObjects prop, serverSwitch env, String db, String set) { - usd = chk * (slt[1]+slt[2]+slt[3]); - bst = (((chk * req) >> 10) + 1) << 10; + usd = chk[0]*slt[3] + chk[1]*slt[2] + chk[2]*slt[1]; + bst = (((chk[2] * req) >> 10) + 1) << 10; god = (((bst / (1+slt[1]+slt[2]+slt[3]) * slt[1]) >> 10) + 1) << 10; if (set.equals("setGood")) env.setConfig("ramCache" + db, god); if (set.equals("setBest")) env.setConfig("ramCache" + db, bst); - prop.put("chunk" + db, chk); + prop.put("chunk" + db, chk[2] + "/" + chk[1] + "/" + chk[0]); prop.put("slreq" + db, req); prop.put("slemp" + db, slt[0]); prop.put("slhig" + db, slt[1]); diff --git a/source/de/anomic/data/messageBoard.java b/source/de/anomic/data/messageBoard.java index e0d2bb45d..741b57bbd 100644 --- a/source/de/anomic/data/messageBoard.java +++ b/source/de/anomic/data/messageBoard.java @@ -82,7 +82,7 @@ public class messageBoard { return database.size(); } - public int dbCacheChunkSize() { + public int[] dbCacheChunkSize() { return database.cacheChunkSize(); } diff --git a/source/de/anomic/data/wikiBoard.java b/source/de/anomic/data/wikiBoard.java index 01fa1193a..3004099bd 100644 --- a/source/de/anomic/data/wikiBoard.java +++ b/source/de/anomic/data/wikiBoard.java @@ -54,6 +54,7 @@ import java.util.TimeZone; import de.anomic.kelondro.kelondroDyn; import de.anomic.kelondro.kelondroMap; +import de.anomic.kelondro.kelondroRecords; import de.anomic.server.serverCodings; public class wikiBoard { @@ -92,8 +93,14 @@ public class wikiBoard { return datbase.size() + bkpbase.size(); } - public int dbCacheChunkSize() { - return (datbase.cacheChunkSize() + bkpbase.cacheChunkSize()) / 2; + public int[] dbCacheChunkSize() { + int[] db = datbase.cacheChunkSize(); + int[] bk = bkpbase.cacheChunkSize(); + int[] i = new int[3]; + i[kelondroRecords.CP_LOW] = (db[kelondroRecords.CP_LOW] + bk[kelondroRecords.CP_LOW]) / 2; + i[kelondroRecords.CP_MEDIUM] = (db[kelondroRecords.CP_MEDIUM] + bk[kelondroRecords.CP_MEDIUM]) / 2; + i[kelondroRecords.CP_HIGH] = (db[kelondroRecords.CP_HIGH] + bk[kelondroRecords.CP_HIGH]) / 2; + return i; } public int[] dbCacheFillStatus() { diff --git a/source/de/anomic/kelondro/kelondroMap.java b/source/de/anomic/kelondro/kelondroMap.java index ec7f9e89f..854fa064d 100644 --- a/source/de/anomic/kelondro/kelondroMap.java +++ b/source/de/anomic/kelondro/kelondroMap.java @@ -133,7 +133,7 @@ public class kelondroMap { return dyn.columnSize(0); } - public int cacheChunkSize() { + public int[] cacheChunkSize() { return dyn.cacheChunkSize(); } diff --git a/source/de/anomic/kelondro/kelondroRecords.java b/source/de/anomic/kelondro/kelondroRecords.java index 5f4cad9f5..8a40eb619 100644 --- a/source/de/anomic/kelondro/kelondroRecords.java +++ b/source/de/anomic/kelondro/kelondroRecords.java @@ -80,18 +80,17 @@ public class kelondroRecords { // constants private static final int NUL = Integer.MIN_VALUE; // the meta value for the kelondroRecords' NUL abstraction - public static final long memBlock = 500000; // do not fill cache further if the amount of available memory is less that this - public static final long memKcolb = 10000000; // if the amount of available memory is greater than this, do not use cache size to block, simply use memory + private static final long memBlock = 500000; // do not fill cache further if the amount of available memory is less that this // memory calculation private static final int element_in_cache = 52; private static final int cache_control_entry = 96; // caching flags - protected static final int CP_NONE = -1; // cache priority none; entry shall not be cached - protected static final int CP_LOW = 0; // cache priority low; entry may be cached - protected static final int CP_MEDIUM = 1; // cache priority medium; entry shall be cached - protected static final int CP_HIGH = 2; // cache priority high; entry must be cached + public static final int CP_NONE = -1; // cache priority none; entry shall not be cached + public static final int CP_LOW = 0; // cache priority low; entry may be cached + public static final int CP_MEDIUM = 1; // cache priority medium; entry shall be cached + public static final int CP_HIGH = 2; // cache priority high; entry must be cached // static seek pointers private static long POS_MAGIC = 0; // 1 byte, byte: file type magic @@ -144,7 +143,7 @@ public class kelondroRecords { private HashMap[] XcacheHeaders; // the cache; holds overhead values and key element private int XcacheSize; // number of cache records private long XcacheStartup; // startup time; for cache aging - private kelondroMScoreCluster XcacheScore; // controls cache aging + private kelondroMScoreCluster cacheScore; // controls cache aging public kelondroRecords(File file, long buffersize /* bytes */, @@ -319,20 +318,28 @@ public class kelondroRecords { if (buffersize <= 0) { this.XcacheSize = 0; this.XcacheHeaders = null; - this.XcacheScore = null; + this.cacheScore = null; } else { if ((buffersize / cacheChunkSize(false)) > size()) { this.XcacheSize = (int) (buffersize / cacheChunkSize(false)); - this.XcacheScore = null; + this.cacheScore = null; // no cache control because we have more cache slots than database entries } else { this.XcacheSize = (int) (buffersize / cacheChunkSize(true)); - this.XcacheScore = new kelondroMScoreCluster(); + this.cacheScore = new kelondroMScoreCluster(); // cache control of CP_HIGH caches } this.XcacheHeaders = new HashMap[]{new HashMap(), new HashMap(), new HashMap()}; } this.XcacheStartup = System.currentTimeMillis(); } + private static final long max = Runtime.getRuntime().maxMemory(); + private static final Runtime runtime = Runtime.getRuntime(); + + private static long availableMemory() { + // memory that is available including increasing total memory up to maximum + return max - runtime.totalMemory() + runtime.freeMemory(); + } + public File file() { if (filename == null) return null; return new File(filename); @@ -342,8 +349,16 @@ public class kelondroRecords { return this.headchunksize + element_in_cache + ((cacheControl) ? cache_control_entry : 0); } - public int cacheChunkSize() { - return cacheChunkSize(this.XcacheScore != null); + public int[] cacheChunkSize() { + // returns three integers: + // #0: chunk size of CP_LOW - priority entries + // #1: chunk size of CP_MEDIUM - priority entries + // #2: chunk size of CP_HIGH - priority entries + int[] i = new int[3]; + i[CP_LOW] = cacheChunkSize(false); + i[CP_MEDIUM] = cacheChunkSize(false); + i[CP_HIGH] = cacheChunkSize(this.cacheScore != null); + return i; } public int[] cacheFillStatus() { @@ -367,65 +382,100 @@ public class kelondroRecords { protected void deleteNode(Handle handle) throws IOException { if (XcacheSize != 0) { synchronized (XcacheHeaders) { - if (XcacheScore == null) { - XcacheHeaders[0].remove(handle); - XcacheHeaders[1].remove(handle); - XcacheHeaders[2].remove(handle); - } else if (XcacheHeaders[0].get(handle) != null) { - XcacheScore.deleteScore(handle); - XcacheHeaders[0].remove(handle); - } else if (XcacheHeaders[1].get(handle) != null) { - XcacheScore.deleteScore(handle); - XcacheHeaders[1].remove(handle); - } else if (XcacheHeaders[2].get(handle) != null) { - XcacheScore.deleteScore(handle); - XcacheHeaders[2].remove(handle); + if (cacheScore == null) { + XcacheHeaders[CP_LOW].remove(handle); + XcacheHeaders[CP_MEDIUM].remove(handle); + XcacheHeaders[CP_HIGH].remove(handle); + } else if (XcacheHeaders[CP_HIGH].get(handle) != null) { + // remove handle from cache-control + cacheScore.deleteScore(handle); + XcacheHeaders[CP_HIGH].remove(handle); + } else if (XcacheHeaders[CP_MEDIUM].get(handle) != null) { + // no cache control for medium-priority entries + XcacheHeaders[CP_MEDIUM].remove(handle); + } else if (XcacheHeaders[CP_LOW].get(handle) != null) { + // no cache control for low-priority entries + XcacheHeaders[CP_LOW].remove(handle); } } } dispose(handle); } - private void checkCacheSpace(int forPriority) { + private boolean cacheSpace(int forPriority) { // check for space in cache // should be only called within a synchronized(XcacheHeaders) environment - if (XcacheSize == 0) return; // no caching - if (XcacheScore == null) return; // no cache control (== no deletion) - long free = Runtime.getRuntime().freeMemory(); - long cs = XcacheHeaders[0].size() + XcacheHeaders[1].size() + XcacheHeaders[2].size(); - if (cs == 0) return; - if ((cs < XcacheSize) && (free >= memBlock)) return; // no need to flush cache space + // returns true if it is allowed to add another entry to the cache + // returns false if the cache is considered to be full + if (forPriority == CP_NONE) return false; + if (XcacheSize == 0) return false; // no caching + long cs = XcacheHeaders[CP_LOW].size() + XcacheHeaders[CP_MEDIUM].size() + XcacheHeaders[CP_HIGH].size(); + if (cs == 0) return true; // nothing there to flush + if ((cs < XcacheSize) && (availableMemory() >= memBlock)) return true; // no need to flush cache space Handle delkey; - // (free < memKcolb)) ?? - // delete one entry - try { - delkey = (Handle) XcacheScore.getMinObject(); // error (see below) here - int cp = priorityOfCache(delkey); - if (cp <= forPriority) { - // deletion is ok, it affects an entry that has less priority - XcacheScore.deleteScore(delkey); - XcacheHeaders[cp].remove(delkey); + // delete one entry. distinguish between different priority cases: + if (forPriority == CP_LOW) { + // remove only from low-priority cache + if (this.XcacheHeaders[CP_LOW].size() != 0) { + // just delete any of the low-priority entries + delkey = (Handle) this.XcacheHeaders[CP_LOW].keySet().iterator().next(); + XcacheHeaders[CP_LOW].remove(delkey); + return true; } else { - for (int i = 0; i < 3; i++) { - if (XcacheHeaders[i].size() > 0) { - delkey = (Handle) XcacheHeaders[i].keySet().iterator().next(); - XcacheScore.deleteScore(delkey); - XcacheHeaders[i].remove(delkey); - break; - } - } + // we cannot delete any entry, therefore there is no space for another entry + return false; + } + } else if (forPriority == CP_MEDIUM) { + if (this.XcacheHeaders[CP_LOW].size() != 0) { + // just delete any of the low-priority entries + delkey = (Handle) this.XcacheHeaders[CP_LOW].keySet().iterator().next(); + XcacheHeaders[CP_LOW].remove(delkey); + return true; + } else if (this.XcacheHeaders[CP_MEDIUM].size() != 0) { + // just delete any of the medium-priority entries + delkey = (Handle) this.XcacheHeaders[CP_MEDIUM].keySet().iterator().next(); + XcacheHeaders[CP_MEDIUM].remove(delkey); + return true; + } else { + // we cannot delete any entry, therefore there is no space for another entry + return false; + } + } else { + // request for a high-priority entry + if (this.XcacheHeaders[CP_LOW].size() != 0) { + // just delete any of the low-priority entries + delkey = (Handle) this.XcacheHeaders[CP_LOW].keySet().iterator().next(); + XcacheHeaders[CP_LOW].remove(delkey); + return true; + } else if (this.XcacheHeaders[CP_MEDIUM].size() != 0) { + // just delete any of the medium-priority entries + delkey = (Handle) this.XcacheHeaders[CP_MEDIUM].keySet().iterator().next(); + XcacheHeaders[CP_MEDIUM].remove(delkey); + return true; + } else if (cacheScore == null) { + // no cache-control of high-priority cache + // the cache is considered as full + return false; + } else try { + // delete one from the high-priority entries + // use the cache-control to find the right object + delkey = (Handle) cacheScore.getMinObject(); + cacheScore.deleteScore(delkey); + XcacheHeaders[CP_HIGH].remove(delkey); + return true; + } catch (NoSuchElementException e) { + // this is a strange error and could be caused by internal java problems + // we simply clear the cache + String error = "cachScore error: " + e.getMessage() + "; cachesize=" + XcacheSize + ", cache.size()=[" + XcacheHeaders[0].size() + "," + XcacheHeaders[1].size() + "," + XcacheHeaders[2].size() + "], cacheScore.size()=" + cacheScore.size(); + this.cacheScore = new kelondroMScoreCluster(); + this.XcacheHeaders[0] = new HashMap(); + this.XcacheHeaders[1] = new HashMap(); + this.XcacheHeaders[2] = new HashMap(); + throw new kelondroException(filename, error); + } - } catch (NoSuchElementException e) { - System.out.println("strange kelondroRecords error: " + e.getMessage() + "; cachesize=" + XcacheSize + ", cache.size()=[" + XcacheHeaders[0].size() + "," + XcacheHeaders[1].size() + "," + XcacheHeaders[2].size() + "], cacheScore.size()=" + XcacheScore.size()); - // this is a strange error and could be caused by internal java problems - // we simply clear the cache - this.XcacheScore = new kelondroMScoreCluster(); - this.XcacheHeaders[0] = new HashMap(); - this.XcacheHeaders[1] = new HashMap(); - this.XcacheHeaders[2] = new HashMap(); } - delkey = null; } private int priorityOfCache(Handle handle) { @@ -538,11 +588,19 @@ public class kelondroRecords { } this.headChanged = false; } else synchronized(XcacheHeaders) { - byte[] cacheEntry = (byte[]) XcacheHeaders[CP_HIGH].get(this.handle); // first try - if (cacheEntry == null) cacheEntry = (byte[]) XcacheHeaders[CP_MEDIUM].get(this.handle); // second try - if (cacheEntry == null) cacheEntry = (byte[]) XcacheHeaders[CP_LOW].get(this.handle); // third try + byte[] cacheEntry = null; + int cp = CP_HIGH; + cacheEntry = (byte[]) XcacheHeaders[CP_HIGH].get(this.handle); // first try + if (cacheEntry == null) { + cacheEntry = (byte[]) XcacheHeaders[CP_MEDIUM].get(this.handle); // second try + cp = CP_MEDIUM; + } + if (cacheEntry == null) { + cacheEntry = (byte[]) XcacheHeaders[CP_LOW].get(this.handle); // third try + cp = CP_LOW; + } if (cacheEntry == null) { - // read overhead and key + // cache miss, we read overhead and key from file //System.out.println("**CACHE miss for " + this.handle.index + "**"); this.headChunk = new byte[headchunksize]; //this.tailChunk = new byte[tailchunksize]; @@ -552,22 +610,28 @@ public class kelondroRecords { //entryFile.read(this.tailChunk, 0, this.tailChunk.length); } this.headChanged = true; // provoke a cache store - int cp = CP_HIGH; + cp = CP_HIGH; if (OHHANDLEC == 3) { Handle l = getOHHandle(1); Handle r = getOHHandle(2); if ((l == null) && (r == null)) cp = CP_LOW; else if ((l == null) || (r == null)) cp = CP_MEDIUM; } - checkCacheSpace(cp); - updateNodeCache(cp); + // if space left in cache, copy these value to the cache + if (XcacheSize > 0) { + XcacheHeaders[CP_LOW].remove(this.handle); + XcacheHeaders[CP_MEDIUM].remove(this.handle); + XcacheHeaders[CP_HIGH].remove(this.handle); + } if (cacheSpace(cp)) updateNodeCache(cp); } else { + // cache hit, copy overhead and key from cache //System.out.println("**CACHE HIT for " + this.handle.index + "**"); - // copy cache entry this.headChunk = new byte[headchunksize]; System.arraycopy(cacheEntry, 0, this.headChunk, 0, headchunksize); // update cache scores to announce this cache hit - if (XcacheScore != null) XcacheScore.setScore(this.handle, (int) ((System.currentTimeMillis() - XcacheStartup) / 1000)); + if ((cacheScore != null) && (cp == CP_HIGH)) { + cacheScore.setScore(this.handle, (int) ((System.currentTimeMillis() - XcacheStartup) / 1000)); + } this.headChanged = false; } } @@ -688,7 +752,12 @@ public class kelondroRecords { //System.out.print("#write "); printChunk(this.handle, this.headChunk); System.out.println(); entryFile.write(this.headChunk); } - updateNodeCache(cachePriority); + if (XcacheSize > 0) { + XcacheHeaders[CP_LOW].remove(this.handle); + XcacheHeaders[CP_MEDIUM].remove(this.handle); + XcacheHeaders[CP_HIGH].remove(this.handle); + } + if (cacheSpace(cachePriority)) updateNodeCache(cachePriority); } // save tail @@ -739,13 +808,9 @@ public class kelondroRecords { if (priority == CP_NONE) return; // it is not wanted that this shall be cached if (XcacheSize == 0) return; // we do not use the cache int cs = XcacheHeaders[CP_LOW].size() + XcacheHeaders[CP_MEDIUM].size() + XcacheHeaders[CP_HIGH].size(); - if ((cs >= XcacheSize) && (XcacheScore == null)) return; // no cache update if cache is full and not controlled - if ((cs >= XcacheSize) && (priority == CP_LOW)) return; // no cache update if cache is full and new entry has low priority + if (cs >= XcacheSize) return; // no cache update if cache is full synchronized (XcacheHeaders) { - // remember size to evaluate a cache size check need - int sizeBefore = cs; - //long memBefore = Runtime.getRuntime().freeMemory(); // generate cache entry byte[] cacheEntry = new byte[headchunksize]; System.arraycopy(headChunk, 0, cacheEntry, 0, headchunksize); @@ -757,16 +822,14 @@ public class kelondroRecords { if (priority != CP_MEDIUM) XcacheHeaders[CP_MEDIUM].remove(cacheHandle); if (priority != CP_HIGH) XcacheHeaders[CP_HIGH].remove(cacheHandle); XcacheHeaders[priority].put(cacheHandle, cacheEntry); - if (XcacheScore != null) XcacheScore.setScore(cacheHandle, (int) ((System.currentTimeMillis() - XcacheStartup) / 1000)); + if ((cacheScore != null) && (priority == CP_HIGH)) { + cacheScore.setScore(cacheHandle, (int) ((System.currentTimeMillis() - XcacheStartup) / 1000)); + } // delete the cache entry buffer cacheEntry = null; cacheHandle = null; //System.out.println("kelondroRecords cache4" + filename + ": cache record size = " + (memBefore - Runtime.getRuntime().freeMemory()) + " bytes" + ((newentry) ? " new" : "")); - // check cache size - cs = XcacheHeaders[CP_LOW].size() + XcacheHeaders[CP_MEDIUM].size() + XcacheHeaders[CP_HIGH].size(); - if (cs > sizeBefore) checkCacheSpace(priority); - //System.out.println("kelondroRecords cache4" + filename + ": " + XcacheHeaders.size() + " entries, " + XcacheSize + " allowed."); //printCache(); } } diff --git a/source/de/anomic/kelondro/kelondroStack.java b/source/de/anomic/kelondro/kelondroStack.java index ed48700b8..67cd98bc7 100644 --- a/source/de/anomic/kelondro/kelondroStack.java +++ b/source/de/anomic/kelondro/kelondroStack.java @@ -257,8 +257,8 @@ public class kelondroStack extends kelondroRecords { Handle h = getHandle(side); if (h == null) return null; if (dist >= size()) return null; // that would exceed the stack - while (dist-- > 0) h = getNode(h, null, 0).getOHHandle(dir); // track through elements - return getNode(h, null, 0); + while (dist-- > 0) h = getNode(h).getOHHandle(dir); // track through elements + return getNode(h); } diff --git a/source/de/anomic/kelondro/kelondroTree.java b/source/de/anomic/kelondro/kelondroTree.java index 244200bb5..9572d03e5 100644 --- a/source/de/anomic/kelondro/kelondroTree.java +++ b/source/de/anomic/kelondro/kelondroTree.java @@ -1370,7 +1370,7 @@ public class kelondroTree extends kelondroRecords implements Comparator { int steps = 0; while (true) { if (testFile.exists()) testFile.delete(); - tt = new kelondroTree(testFile, 2000, 4 ,4); + tt = new kelondroTree(testFile, 200, 4 ,4); steps = 10 + ((int) System.currentTimeMillis() % 7) * (((int) System.currentTimeMillis() + 17) % 11); t = s; d = ""; diff --git a/source/de/anomic/plasma/plasmaHTCache.java b/source/de/anomic/plasma/plasmaHTCache.java index 100a5bd2a..31d536433 100644 --- a/source/de/anomic/plasma/plasmaHTCache.java +++ b/source/de/anomic/plasma/plasmaHTCache.java @@ -139,7 +139,7 @@ public final class plasmaHTCache { return responseHeaderDB.size(); } - public int dbCacheChunkSize() { + public int[] dbCacheChunkSize() { return responseHeaderDB.cacheChunkSize(); } diff --git a/source/de/anomic/plasma/plasmaWordIndex.java b/source/de/anomic/plasma/plasmaWordIndex.java index 40a0f0da1..7935a551e 100644 --- a/source/de/anomic/plasma/plasmaWordIndex.java +++ b/source/de/anomic/plasma/plasmaWordIndex.java @@ -85,7 +85,7 @@ public final class plasmaWordIndex { return ramCache.assortmentsSizes(); } - public int assortmentsCacheChunkSizeAvg() { + public int[] assortmentsCacheChunkSizeAvg() { return ramCache.assortmentsCacheChunkSizeAvg(); } diff --git a/source/de/anomic/plasma/plasmaWordIndexAssortment.java b/source/de/anomic/plasma/plasmaWordIndexAssortment.java index 3380530fe..32c809b63 100644 --- a/source/de/anomic/plasma/plasmaWordIndexAssortment.java +++ b/source/de/anomic/plasma/plasmaWordIndexAssortment.java @@ -148,10 +148,10 @@ public final class plasmaWordIndexAssortment { try { oldrow = assortments.put(row); } catch (IOException e) { - log.logSevere("storeAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB", e); + log.logSevere("storeAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e); resetDatabase(); } catch (kelondroException e) { - log.logSevere("storeAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB", e); + log.logSevere("storeAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e); resetDatabase(); } if (oldrow != null) throw new RuntimeException("Store to assortment ambiguous"); @@ -164,11 +164,11 @@ public final class plasmaWordIndexAssortment { try { row = assortments.remove(wordHash.getBytes()); } catch (IOException e) { - log.logSevere("removeAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB", e); + log.logSevere("removeAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e); resetDatabase(); return null; } catch (kelondroException e) { - log.logSevere("removeAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB", e); + log.logSevere("removeAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e); resetDatabase(); return null; } @@ -199,11 +199,11 @@ public final class plasmaWordIndexAssortment { try { return assortments.keys(up, rot, startWordHash.getBytes()); } catch (IOException e) { - log.logSevere("iterateAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB", e); + log.logSevere("iterateAssortment/IO-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e); resetDatabase(); return null; } catch (kelondroException e) { - log.logSevere("iterateAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB", e); + log.logSevere("iterateAssortment/kelondro-error: " + e.getMessage() + " - reset assortment-DB " + assortments.file(), e); resetDatabase(); return null; } @@ -213,7 +213,7 @@ public final class plasmaWordIndexAssortment { return assortments.size(); } - public int cacheChunkSize() { + public int[] cacheChunkSize() { return assortments.cacheChunkSize(); } diff --git a/source/de/anomic/plasma/plasmaWordIndexAssortmentCluster.java b/source/de/anomic/plasma/plasmaWordIndexAssortmentCluster.java index ec969f4a8..3265261f0 100644 --- a/source/de/anomic/plasma/plasmaWordIndexAssortmentCluster.java +++ b/source/de/anomic/plasma/plasmaWordIndexAssortmentCluster.java @@ -50,6 +50,7 @@ import java.io.File; import java.util.HashSet; import java.util.Iterator; +import de.anomic.kelondro.kelondroRecords; import de.anomic.kelondro.kelondroMergeIterator; import de.anomic.server.logging.serverLog; @@ -191,10 +192,19 @@ public final class plasmaWordIndexAssortmentCluster { return sizes; } - public int cacheChunkSizeAvg() { - int total = 0; - for (int i = 0; i < clusterCount; i++) total += assortments[i].cacheChunkSize(); - return total / clusterCount; + public int[] cacheChunkSizeAvg() { + int[] i = new int[]{0, 0, 0}; + int[] a = new int[3]; + for (int j = 0; j < clusterCount; j++) { + a = assortments[j].cacheChunkSize(); + i[kelondroRecords.CP_LOW] += a[kelondroRecords.CP_LOW]; + i[kelondroRecords.CP_MEDIUM] += a[kelondroRecords.CP_MEDIUM]; + i[kelondroRecords.CP_HIGH] += a[kelondroRecords.CP_HIGH]; + } + a[kelondroRecords.CP_LOW] = i[kelondroRecords.CP_LOW] / clusterCount; + a[kelondroRecords.CP_MEDIUM] = i[kelondroRecords.CP_MEDIUM] / clusterCount; + a[kelondroRecords.CP_HIGH] = i[kelondroRecords.CP_HIGH] / clusterCount; + return a; } public int[] cacheFillStatusCml() { diff --git a/source/de/anomic/plasma/plasmaWordIndexCache.java b/source/de/anomic/plasma/plasmaWordIndexCache.java index 6bf2de41c..354de0542 100644 --- a/source/de/anomic/plasma/plasmaWordIndexCache.java +++ b/source/de/anomic/plasma/plasmaWordIndexCache.java @@ -265,7 +265,7 @@ public final class plasmaWordIndexCache implements plasmaWordIndexInterface { return assortmentCluster.sizes(); } - public int assortmentsCacheChunkSizeAvg() { + public int[] assortmentsCacheChunkSizeAvg() { return assortmentCluster.cacheChunkSizeAvg(); } diff --git a/source/de/anomic/server/serverMemory.java b/source/de/anomic/server/serverMemory.java new file mode 100644 index 000000000..a7af9cc93 --- /dev/null +++ b/source/de/anomic/server/serverMemory.java @@ -0,0 +1,68 @@ +// serverMemory.java +// ------------------------------------------- +// (C) by Michael Peter Christen; mc@anomic.de +// first published on http://www.anomic.de +// Frankfurt, Germany, 2005 +// Created 22.09.2005 +// +// $LastChangedDate: 2005-09-21 16:21:45 +0200 (Wed, 21 Sep 2005) $ +// $LastChangedRevision: 763 $ +// $LastChangedBy: orbiter $ +// +// This program is free software; you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation; either version 2 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +// +// Using this software in any meaning (reading, learning, copying, compiling, +// running) means that you agree that the Author(s) is (are) not responsible +// for cost, loss of data or any harm that may be caused directly or indirectly +// by usage of this softare or this documentation. The usage of this software +// is on your own risk. The installation and usage (starting/running) of this +// software may allow other people or application to access your computer and +// any attached devices and is highly dependent on the configuration of the +// software which must be done by the user of the software; the author(s) is +// (are) also not responsible for proper configuration and usage of the +// software, even if provoked by documentation provided together with +// the software. +// +// Any changes to this file according to the GPL as documented in the file +// gpl.txt aside this file in the shipment you received can be done to the +// lines that follows this copyright notice here, but changes must not be +// done inside the copyright notive above. A re-distribution must contain +// the intact and unchanged copyright notice. +// Contributions and changes to the program code must be marked as such. + + +package de.anomic.server; + +public class serverMemory { + + public static final long max = Runtime.getRuntime().maxMemory(); + private static final Runtime runtime = Runtime.getRuntime(); + + public static long free() { + // memory that is free without increasing of total memory takenn from os + return runtime.freeMemory(); + } + + public static long available() { + // memory that is available including increasing total memory up to maximum + return max - runtime.totalMemory() + runtime.freeMemory(); + } + + public static long used() { + // memory that is currently bound in objects + return runtime.totalMemory() - runtime.freeMemory(); + } + +} diff --git a/source/de/anomic/yacy/yacyNewsDB.java b/source/de/anomic/yacy/yacyNewsDB.java index adfc9a237..fa748c917 100644 --- a/source/de/anomic/yacy/yacyNewsDB.java +++ b/source/de/anomic/yacy/yacyNewsDB.java @@ -89,7 +89,7 @@ public class yacyNewsDB { news = createDB(path, bufferkb); } - public int dbCacheChunkSize() { + public int[] dbCacheChunkSize() { return news.cacheChunkSize(); } diff --git a/source/de/anomic/yacy/yacyNewsPool.java b/source/de/anomic/yacy/yacyNewsPool.java index 1e2c060d0..dbe90337b 100644 --- a/source/de/anomic/yacy/yacyNewsPool.java +++ b/source/de/anomic/yacy/yacyNewsPool.java @@ -103,7 +103,7 @@ public class yacyNewsPool { return newsDB.size(); } - public int dbCacheChunkSize() { + public int[] dbCacheChunkSize() { return newsDB.dbCacheChunkSize(); } diff --git a/source/de/anomic/yacy/yacySeedDB.java b/source/de/anomic/yacy/yacySeedDB.java index 3cd67166f..a7f5c2095 100644 --- a/source/de/anomic/yacy/yacySeedDB.java +++ b/source/de/anomic/yacy/yacySeedDB.java @@ -58,6 +58,7 @@ import de.anomic.kelondro.kelondroDyn; import de.anomic.kelondro.kelondroException; import de.anomic.kelondro.kelondroMScoreCluster; import de.anomic.kelondro.kelondroMap; +import de.anomic.kelondro.kelondroRecords; import de.anomic.plasma.plasmaSwitchboard; import de.anomic.server.serverCore; import de.anomic.server.serverSwitch; @@ -148,8 +149,15 @@ public final class yacySeedDB { } catch (IOException e) {} } - public int dbCacheChunkSize() { - return (seedActiveDB.cacheChunkSize() + seedPassiveDB.cacheChunkSize() + seedPotentialDB.cacheChunkSize()) / 3; + public int[] dbCacheChunkSize() { + int[] ac = seedActiveDB.cacheChunkSize(); + int[] pa = seedPassiveDB.cacheChunkSize(); + int[] po = seedPotentialDB.cacheChunkSize(); + int[] i = new int[3]; + i[kelondroRecords.CP_LOW] = (ac[kelondroRecords.CP_LOW] + pa[kelondroRecords.CP_LOW] + po[kelondroRecords.CP_LOW]) / 3; + i[kelondroRecords.CP_MEDIUM] = (ac[kelondroRecords.CP_MEDIUM] + pa[kelondroRecords.CP_MEDIUM] + po[kelondroRecords.CP_MEDIUM]) / 3; + i[kelondroRecords.CP_HIGH] = (ac[kelondroRecords.CP_HIGH] + pa[kelondroRecords.CP_HIGH] + po[kelondroRecords.CP_HIGH]) / 3; + return i; } public int[] dbCacheFillStatus() {