added monitoring of new object cache to performanceMemory page

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@2072 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 19 years ago
parent 23ced30e83
commit 29b1b0823c

@ -80,8 +80,12 @@
<form action="PerformanceMemory_p.html" method="post" enctype="multipart/form-data">
<tr class="TableHeader" valign="bottom">
<td class="small" rowspan="2">Database<br>&nbsp;</td>
<td class="small" rowspan="2">Chunk Size<br>high/med/low<br>(bytes)</td>
<td class="small" rowspan="2">Node<br>Chunk Size<br>high/med/low<br>(bytes)</td>
<td class="small" colspan="5">#Slots</td>
<td class="small" rowspan="2">Object<br>Chunk Size<br>Max</td>
<td class="small" rowspan="2">Object<br>Chunk Size<br>Current</td>
<td class="small" rowspan="2">Hit/Miss<br>Ratio<br></td>
<td class="small" rowspan="2">Unique/Double<br>Ratio<br></td>
<td class="small" colspan="5">Memory Occupation (KBytes)</td>
<td class="small" rowspan="2">Description<br>&nbsp;</td>
</tr>
@ -107,6 +111,10 @@
<td class="small" align="right">#[slhigRWI]#</td>
<td class="small" align="right">#[slmedRWI]#</td>
<td class="small" align="right">#[sllowRWI]#</td>
<td class="small" align="right">#[ochunkmaxRWI]#</td>
<td class="small" align="right">#[ochunkcurRWI]#</td>
<td class="small" align="right">#[ohittmissRWI]#</td>
<td class="small" align="right">#[ouniqdoubRWI]#</td>
<td class="small" align="right">#[usedRWI]#</td>
<td class="small" align="right"><input name="ramCacheRWI" type="text" align="right" size="6" maxlength="7" value="#[ramCacheRWI]#"></td>
<td class="small" align="right">#[dfltRWI]#</td>
@ -125,6 +133,10 @@ cache will speed up crawls with a depth > 3.</td>
<td class="small" align="right">#[slhigHTTP]#</td>
<td class="small" align="right">#[slmedHTTP]#</td>
<td class="small" align="right">#[sllowHTTP]#</td>
<td class="small" align="right">#[ochunkmaxHTTP]#</td>
<td class="small" align="right">#[ochunkcurHTTP]#</td>
<td class="small" align="right">#[ohittmissHTTP]#</td>
<td class="small" align="right">#[ouniqdoubHTTP]#</td>
<td class="small" align="right">#[usedHTTP]#</td>
<td class="small" align="right"><input name="ramCacheHTTP" type="text" align="right" size="6" maxlength="7" value="#[ramCacheHTTP]#"></td>
<td class="small" align="right">#[dfltHTTP]#</td>
@ -143,6 +155,10 @@ Increasing this cache will be most important for a fast proxy mode.</td>
<td class="small" align="right">#[slhigLURL]#</td>
<td class="small" align="right">#[slmedLURL]#</td>
<td class="small" align="right">#[sllowLURL]#</td>
<td class="small" align="right">#[ochunkmaxLURL]#</td>
<td class="small" align="right">#[ochunkcurLURL]#</td>
<td class="small" align="right">#[ohittmissLURL]#</td>
<td class="small" align="right">#[ouniqdoubLURL]#</td>
<td class="small" align="right">#[usedLURL]#</td>
<td class="small" align="right"><input name="ramCacheLURL" type="text" align="right" size="6" maxlength="7" value="#[ramCacheLURL]#"></td>
<td class="small" align="right">#[dfltLURL]#</td>
@ -160,6 +176,10 @@ This cache is very important for a fast search process. Increasing the cache siz
<td class="small" align="right">#[slhigNURL]#</td>
<td class="small" align="right">#[slmedNURL]#</td>
<td class="small" align="right">#[sllowNURL]#</td>
<td class="small" align="right">#[ochunkmaxNURL]#</td>
<td class="small" align="right">#[ochunkcurNURL]#</td>
<td class="small" align="right">#[ohittmissNURL]#</td>
<td class="small" align="right">#[ouniqdoubNURL]#</td>
<td class="small" align="right">#[usedNURL]#</td>
<td class="small" align="right"><input name="ramCacheNURL" type="text" align="right" size="6" maxlength="7" value="#[ramCacheNURL]#"></td>
<td class="small" align="right">#[dfltNURL]#</td>
@ -177,6 +197,10 @@ Increasing the cache size will result in faster double-check during URL recognit
<td class="small" align="right">#[slhigEURL]#</td>
<td class="small" align="right">#[slmedEURL]#</td>
<td class="small" align="right">#[sllowEURL]#</td>
<td class="small" align="right">#[ochunkmaxEURL]#</td>
<td class="small" align="right">#[ochunkcurEURL]#</td>
<td class="small" align="right">#[ohittmissEURL]#</td>
<td class="small" align="right">#[ouniqdoubEURL]#</td>
<td class="small" align="right">#[usedEURL]#</td>
<td class="small" align="right"><input name="ramCacheEURL" type="text" align="right" size="6" maxlength="7" value="#[ramCacheEURL]#"></td>
<td class="small" align="right">#[dfltEURL]#</td>
@ -194,6 +218,10 @@ Increasing the cache size will most probably speed up crawling slightly, but not
<td class="small" align="right">#[slhigDHT]#</td>
<td class="small" align="right">#[slmedDHT]#</td>
<td class="small" align="right">#[sllowDHT]#</td>
<td class="small" align="right">#[ochunkmaxDHT]#</td>
<td class="small" align="right">#[ochunkcurDHT]#</td>
<td class="small" align="right">#[ohittmissDHT]#</td>
<td class="small" align="right">#[ouniqdoubDHT]#</td>
<td class="small" align="right">#[usedDHT]#</td>
<td class="small" align="right"><input name="ramCacheDHT" type="text" align="right" size="6" maxlength="7" value="#[ramCacheDHT]#"></td>
<td class="small" align="right">#[dfltDHT]#</td>
@ -212,6 +240,10 @@ Increasing this cache may speed up many functions, but we need to test this to s
<td class="small" align="right">#[slhigMessage]#</td>
<td class="small" align="right">#[slmedMessage]#</td>
<td class="small" align="right">#[sllowMessage]#</td>
<td class="small" align="right">#[ochunkmaxMessage]#</td>
<td class="small" align="right">#[ochunkcurMessage]#</td>
<td class="small" align="right">#[ohittmissMessage]#</td>
<td class="small" align="right">#[ouniqdoubMessage]#</td>
<td class="small" align="right">#[usedMessage]#</td>
<td class="small" align="right"><input name="ramCacheMessage" type="text" align="right" size="6" maxlength="7" value="#[ramCacheMessage]#"></td>
<td class="small" align="right">#[dfltMessage]#</td>
@ -228,6 +260,10 @@ Increasing this cache may speed up many functions, but we need to test this to s
<td class="small" align="right">#[slhigWiki]#</td>
<td class="small" align="right">#[slmedWiki]#</td>
<td class="small" align="right">#[sllowWiki]#</td>
<td class="small" align="right">#[ochunkmaxWiki]#</td>
<td class="small" align="right">#[ochunkcurWiki]#</td>
<td class="small" align="right">#[ohittmissWiki]#</td>
<td class="small" align="right">#[ouniqdoubWiki]#</td>
<td class="small" align="right">#[usedWiki]#</td>
<td class="small" align="right"><input name="ramCacheWiki" type="text" align="right" size="6" maxlength="7" value="#[ramCacheWiki]#"></td>
<td class="small" align="right">#[dfltWiki]#</td>
@ -246,6 +282,10 @@ Increasing this cache may speed up access to the wiki pages.</td>
<td class="small" align="right">#[slhigBlog]#</td>
<td class="small" align="right">#[slmedBlog]#</td>
<td class="small" align="right">#[sllowBlog]#</td>
<td class="small" align="right">#[ochunkmaxBlog]#</td>
<td class="small" align="right">#[ochunkcurBlog]#</td>
<td class="small" align="right">#[ohittmissBlog]#</td>
<td class="small" align="right">#[ouniqdoubBlog]#</td>
<td class="small" align="right">#[usedBlog]#</td>
<td class="small" align="right"><input name="ramCacheBlog" type="text" align="right" size="6" maxlength="7" value="#[ramCacheBlog]#"></td>
<td class="small" align="right">#[dfltBlog]#</td>
@ -263,6 +303,10 @@ Increasing this cache may speed up access to the Blog.</td>
<td class="small" align="right">#[slhigNews]#</td>
<td class="small" align="right">#[slmedNews]#</td>
<td class="small" align="right">#[sllowNews]#</td>
<td class="small" align="right">#[ochunkmaxNews]#</td>
<td class="small" align="right">#[ochunkcurNews]#</td>
<td class="small" align="right">#[ohittmissNews]#</td>
<td class="small" align="right">#[ouniqdoubNews]#</td>
<td class="small" align="right">#[usedNews]#</td>
<td class="small" align="right"><input name="ramCacheNews" type="text" align="right" size="6" maxlength="7" value="#[ramCacheNews]#"></td>
<td class="small" align="right">#[dfltNews]#</td>
@ -280,6 +324,10 @@ Increasing this cache may speed up the peer-ping.</td>
<td class="small" align="right">#[slhigRobots]#</td>
<td class="small" align="right">#[slmedRobots]#</td>
<td class="small" align="right">#[sllowRobots]#</td>
<td class="small" align="right">#[ochunkmaxRobots]#</td>
<td class="small" align="right">#[ochunkcurRobots]#</td>
<td class="small" align="right">#[ohittmissRobots]#</td>
<td class="small" align="right">#[ouniqdoubRobots]#</td>
<td class="small" align="right">#[usedRobots]#</td>
<td class="small" align="right"><input name="ramCacheRobots" type="text" align="right" size="6" maxlength="7" value="#[ramCacheRobots]#"></td>
<td class="small" align="right">#[dfltRobots]#</td>
@ -297,6 +345,10 @@ Increasing this cache may speed up validation if crawling of the URL is allowed.
<td class="small" align="right">#[slhigProfiles]#</td>
<td class="small" align="right">#[slmedProfiles]#</td>
<td class="small" align="right">#[sllowProfiles]#</td>
<td class="small" align="right">#[ochunkmaxProfiles]#</td>
<td class="small" align="right">#[ochunkcurProfiles]#</td>
<td class="small" align="right">#[ohittmissProfiles]#</td>
<td class="small" align="right">#[ouniqdoubProfiles]#</td>
<td class="small" align="right">#[usedProfiles]#</td>
<td class="small" align="right"><input name="ramCacheProfiles" type="text" align="right" size="6" maxlength="7" value="#[ramCacheProfiles]#"></td>
<td class="small" align="right">#[dfltProfiles]#</td>

@ -63,8 +63,9 @@ public class PerformanceMemory_p {
private static final int MB = 1024 * KB;
private static Map defaultSettings = null;
private static int[] slt,chk;
private static int req,usd,bst,god;
private static int[] slt,chk;
private static String[] ost;
private static int req, usd, bst, god;
private static long usedTotal, currTotal, dfltTotal, goodTotal, bestTotal;
@ -157,61 +158,73 @@ public class PerformanceMemory_p {
req = sb.wordIndex.size();
chk = sb.wordIndex.assortmentsCacheChunkSizeAvg();
slt = sb.wordIndex.assortmentsCacheFillStatusCml();
ost = sb.wordIndex.assortmentsCacheObjectStatus();
putprop(prop, env, "RWI", set);
req = sb.cacheManager.dbSize();
chk = sb.cacheManager.dbCacheChunkSize();
slt = sb.cacheManager.dbCacheFillStatus();
ost = sb.cacheManager.dbCacheObjectStatus();
putprop(prop, env, "HTTP", set);
req = sb.urlPool.loadedURL.urlHashCache.size();
chk = sb.urlPool.loadedURL.urlHashCache.cacheChunkSize();
slt = sb.urlPool.loadedURL.urlHashCache.cacheFillStatus();
chk = sb.urlPool.loadedURL.urlHashCache.cacheNodeChunkSize();
slt = sb.urlPool.loadedURL.urlHashCache.cacheNodeFillStatus();
ost = sb.urlPool.loadedURL.urlHashCache.cacheObjectStatus();
putprop(prop, env, "LURL", set);
req = sb.urlPool.noticeURL.urlHashCache.size();
chk = sb.urlPool.noticeURL.urlHashCache.cacheChunkSize();
slt = sb.urlPool.noticeURL.urlHashCache.cacheFillStatus();
chk = sb.urlPool.noticeURL.urlHashCache.cacheNodeChunkSize();
slt = sb.urlPool.noticeURL.urlHashCache.cacheNodeFillStatus();
ost = sb.urlPool.noticeURL.urlHashCache.cacheObjectStatus();
putprop(prop, env, "NURL", set);
req = sb.urlPool.errorURL.urlHashCache.size();
chk = sb.urlPool.errorURL.urlHashCache.cacheChunkSize();
slt = sb.urlPool.errorURL.urlHashCache.cacheFillStatus();
chk = sb.urlPool.errorURL.urlHashCache.cacheNodeChunkSize();
slt = sb.urlPool.errorURL.urlHashCache.cacheNodeFillStatus();
ost = sb.urlPool.errorURL.urlHashCache.cacheObjectStatus();
putprop(prop, env, "EURL", set);
req = yacyCore.seedDB.sizeConnected() + yacyCore.seedDB.sizeDisconnected() + yacyCore.seedDB.sizePotential();
chk = yacyCore.seedDB.dbCacheChunkSize();
slt = yacyCore.seedDB.dbCacheFillStatus();
chk = yacyCore.seedDB.dbCacheNodeChunkSize();
slt = yacyCore.seedDB.dbCacheNodeFillStatus();
ost = yacyCore.seedDB.dbCacheObjectStatus();
putprop(prop, env, "DHT", set);
req = sb.messageDB.size();
chk = sb.messageDB.dbCacheChunkSize();
slt = sb.messageDB.dbCacheFillStatus();
chk = sb.messageDB.dbCacheNodeChunkSize();
slt = sb.messageDB.dbCacheNodeFillStatus();
ost = sb.messageDB.dbCacheObjectStatus();
putprop(prop, env, "Message", set);
req = sb.wikiDB.sizeOfTwo();
chk = sb.wikiDB.dbCacheChunkSize();
slt = sb.wikiDB.dbCacheFillStatus();
chk = sb.wikiDB.dbCacheNodeChunkSize();
slt = sb.wikiDB.dbCacheNodeFillStatus();
ost = sb.wikiDB.dbCacheObjectStatus();
putprop(prop, env, "Wiki", set);
req = sb.blogDB.size();
chk = sb.blogDB.dbCacheChunkSize();
slt = sb.blogDB.dbCacheFillStatus();
chk = sb.blogDB.dbCacheNodeChunkSize();
slt = sb.blogDB.dbCacheNodeFillStatus();
ost = sb.blogDB.dbCacheObjectStatus();
putprop(prop, env, "Blog", set);
req = yacyCore.newsPool.dbSize();
chk = yacyCore.newsPool.dbCacheChunkSize();
slt = yacyCore.newsPool.dbCacheFillStatus();
chk = yacyCore.newsPool.dbCacheNodeChunkSize();
slt = yacyCore.newsPool.dbCacheNodeFillStatus();
ost = yacyCore.newsPool.dbCacheObjectStatus();
putprop(prop, env, "News", set);
req = plasmaSwitchboard.robots.size();
chk = plasmaSwitchboard.robots.dbCacheChunkSize();
slt = plasmaSwitchboard.robots.dbCacheFillStatus();
chk = plasmaSwitchboard.robots.dbCacheNodeChunkSize();
slt = plasmaSwitchboard.robots.dbCacheNodeFillStatus();
ost = plasmaSwitchboard.robots.dbCacheObjectStatus();
putprop(prop, env, "Robots", set);
req = sb.profiles.size();
chk = sb.profiles.dbCacheChunkSize();
slt = sb.profiles.dbCacheFillStatus();
chk = sb.profiles.dbCacheNodeChunkSize();
slt = sb.profiles.dbCacheNodeFillStatus();
ost = sb.profiles.dbCacheObjectStatus();
putprop(prop, env, "Profiles", set);
prop.put("usedTotal", usedTotal / MB);
@ -289,6 +302,10 @@ public class PerformanceMemory_p {
prop.put("slhig" + db, slt[1]);
prop.put("slmed" + db, slt[2]);
prop.put("sllow" + db, slt[3]);
prop.put("ochunkmax" + db, ost[0]);
prop.put("ochunkcur" + db, ost[1]);
prop.put("ohittmiss" + db, ost[5] + ":" + ost[6]);
prop.put("ouniqdoub" + db, ost[7] + ":" + ost[8]);
prop.put("used" + db, usd / KB);
prop.put("good" + db, god / KB);
prop.put("best" + db, bst / KB);

@ -85,12 +85,16 @@ public class blogBoard {
return datbase.size();
}
public int[] dbCacheChunkSize() {
return datbase.cacheChunkSize();
public int[] dbCacheNodeChunkSize() {
return datbase.cacheNodeChunkSize();
}
public int[] dbCacheFillStatus() {
return datbase.cacheFillStatus();
public int[] dbCacheNodeFillStatus() {
return datbase.cacheNodeFillStatus();
}
public String[] dbCacheObjectStatus() {
return datbase.cacheObjectStatus();
}
public void close() {

@ -86,12 +86,16 @@ public class messageBoard {
return database.size();
}
public int[] dbCacheChunkSize() {
return database.cacheChunkSize();
public int[] dbCacheNodeChunkSize() {
return database.cacheNodeChunkSize();
}
public int[] dbCacheFillStatus() {
return database.cacheFillStatus();
public int[] dbCacheNodeFillStatus() {
return database.cacheNodeFillStatus();
}
public String[] dbCacheObjectStatus() {
return database.cacheObjectStatus();
}
public void close() {

@ -92,12 +92,12 @@ public final class userDB {
}
}
public int[] dbCacheChunkSize() {
return userTable.cacheChunkSize();
public int[] dbCacheNodeChunkSize() {
return userTable.cacheNodeChunkSize();
}
public int[] dbCacheFillStatus() {
return userTable.cacheFillStatus();
public int[] dbCacheNodeFillStatus() {
return userTable.cacheNodeFillStatus();
}
void resetDatabase() {

@ -100,9 +100,9 @@ public class wikiBoard {
return datbase.size();
}
public int[] dbCacheChunkSize() {
int[] db = datbase.cacheChunkSize();
int[] bk = bkpbase.cacheChunkSize();
public int[] dbCacheNodeChunkSize() {
int[] db = datbase.cacheNodeChunkSize();
int[] bk = bkpbase.cacheNodeChunkSize();
int[] i = new int[3];
i[kelondroRecords.CP_LOW] = (db[kelondroRecords.CP_LOW] + bk[kelondroRecords.CP_LOW]) / 2;
i[kelondroRecords.CP_MEDIUM] = (db[kelondroRecords.CP_MEDIUM] + bk[kelondroRecords.CP_MEDIUM]) / 2;
@ -110,12 +110,16 @@ public class wikiBoard {
return i;
}
public int[] dbCacheFillStatus() {
int[] a = datbase.cacheFillStatus();
int[] b = bkpbase.cacheFillStatus();
public int[] dbCacheNodeFillStatus() {
int[] a = datbase.cacheNodeFillStatus();
int[] b = bkpbase.cacheNodeFillStatus();
return new int[]{a[0] + b[0], a[1] + b[1], a[2] + b[2], a[3] + b[3]};
}
public String[] dbCacheObjectStatus() {
return datbase.cacheObjectStatus();
}
public void close() {
try {datbase.close();} catch (IOException e) {}
try {bkpbase.close();} catch (IOException e) {}

@ -133,12 +133,16 @@ public class kelondroMap {
return dyn.columnSize(0);
}
public int[] cacheChunkSize() {
return dyn.cacheChunkSize();
public int[] cacheNodeChunkSize() {
return dyn.cacheNodeChunkSize();
}
public int[] cacheFillStatus() {
return dyn.cacheFillStatus();
public int[] cacheNodeFillStatus() {
return dyn.cacheNodeFillStatus();
}
public String[] cacheObjectStatus() {
return dyn.cacheObjectStatus();
}
public synchronized void set(String key, Map newMap) throws IOException {

@ -95,6 +95,10 @@ public class kelondroObjectCache {
this.maxSize = maxSize;
}
public int maxSize() {
return this.maxSize;
}
public void setMinMem(int minMem) {
this.minMem = minMem;
}
@ -113,6 +117,41 @@ public class kelondroObjectCache {
return cache.size();
}
public String[] status() {
return new String[]{
Integer.toString(maxSize()),
Integer.toString(size()),
Long.toString(this.maxAge),
Long.toString(minAge()),
Long.toString(maxAge()),
Integer.toString(readHit),
Integer.toString(readMiss),
Integer.toString(writeUnique),
Integer.toString(writeDouble)
};
}
private static String[] combinedStatus(String[] a, String[] b) {
return new String[]{
Integer.toString(Integer.parseInt(a[0]) + Integer.parseInt(b[0])),
Integer.toString(Integer.parseInt(a[1]) + Integer.parseInt(b[1])),
Long.toString(Math.max(Long.parseLong(a[2]), Long.parseLong(b[2]))),
Long.toString(Math.min(Long.parseLong(a[3]), Long.parseLong(b[3]))),
Long.toString(Math.max(Long.parseLong(a[4]), Long.parseLong(b[4]))),
Integer.toString(Integer.parseInt(a[5]) + Integer.parseInt(b[5])),
Integer.toString(Integer.parseInt(a[6]) + Integer.parseInt(b[6])),
Integer.toString(Integer.parseInt(a[7]) + Integer.parseInt(b[7])),
Integer.toString(Integer.parseInt(a[8]) + Integer.parseInt(b[8]))
};
}
public static String[] combinedStatus(String[][] a, int l) {
if ((a == null) || (a.length == 0) || (l == 0)) return null;
if ((a.length >= 1) && (l == 1)) return a[0];
if ((a.length >= 2) && (l == 2)) return combinedStatus(a[0], a[1]);
return combinedStatus(combinedStatus(a, l - 1), a[l - 1]);
}
private int intTime(long longTime) {
return (int) Math.max(0, ((longTime - startTime) / 1000));
}

@ -427,11 +427,11 @@ public class kelondroRecords {
this.XcacheHeaders = null;
this.cacheScore = null;
} else {
if ((buffersize / cacheChunkSize(false)) > size()) {
this.XcacheSize = (int) (buffersize / cacheChunkSize(false));
if ((buffersize / cacheNodeChunkSize(false)) > size()) {
this.XcacheSize = (int) (buffersize / cacheNodeChunkSize(false));
this.cacheScore = null; // no cache control because we have more cache slots than database entries
} else {
this.XcacheSize = (int) (buffersize / cacheChunkSize(true));
this.XcacheSize = (int) (buffersize / cacheNodeChunkSize(true));
this.cacheScore = new kelondroMScoreCluster(); // cache control of CP_HIGH caches
}
this.XcacheHeaders = new HashMap[]{new HashMap(), new HashMap(), new HashMap()};
@ -452,23 +452,23 @@ public class kelondroRecords {
return new File(filename);
}
protected final int cacheChunkSize(boolean cacheControl) {
protected final int cacheNodeChunkSize(boolean cacheControl) {
return this.headchunksize + element_in_cache + ((cacheControl) ? cache_control_entry : 0);
}
public int[] cacheChunkSize() {
public int[] cacheNodeChunkSize() {
// returns three integers:
// #0: chunk size of CP_LOW - priority entries
// #1: chunk size of CP_MEDIUM - priority entries
// #2: chunk size of CP_HIGH - priority entries
int[] i = new int[3];
i[CP_LOW] = cacheChunkSize(false);
i[CP_MEDIUM] = cacheChunkSize(false);
i[CP_HIGH] = cacheChunkSize(this.cacheScore != null);
i[CP_LOW] = cacheNodeChunkSize(false);
i[CP_MEDIUM] = cacheNodeChunkSize(false);
i[CP_HIGH] = cacheNodeChunkSize(this.cacheScore != null);
return i;
}
public int[] cacheFillStatus() {
public int[] cacheNodeFillStatus() {
if (XcacheHeaders == null) return new int[]{0,0,0,0};
return new int[]{XcacheSize - (XcacheHeaders[CP_HIGH].size() + XcacheHeaders[CP_MEDIUM].size() + XcacheHeaders[CP_LOW].size()), XcacheHeaders[CP_HIGH].size(), XcacheHeaders[CP_MEDIUM].size(), XcacheHeaders[CP_LOW].size()};
}

@ -99,7 +99,7 @@ public final class kelondroStack extends kelondroRecords {
public static kelondroStack reset(kelondroStack stack) {
// memorize settings to this file
File f = new File(stack.filename);
long bz = stack.XcacheSize * stack.cacheChunkSize(true);
long bz = stack.XcacheSize * stack.cacheNodeChunkSize(true);
int[] cols = stack.COLWIDTHS;
// close and delete the file

@ -122,7 +122,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
writeOrderType();
super.setLogger(log);
long objectbuffersize = objectCachePercent * buffersize / (nodeCachePercent + objectCachePercent);
long nodecachesize = objectbuffersize / (super.objectsize + 8 * columns.length);
long nodecachesize = objectbuffersize / cacheObjectChunkSize();
this.objectCache = new kelondroObjectCache(this.filename, (int) nodecachesize, nodecachesize * 300 , 4*1024*1024);
}
@ -148,7 +148,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
writeOrderType();
super.setLogger(log);
long objectbuffersize = objectCachePercent * buffersize / (nodeCachePercent + objectCachePercent);
long nodecachesize = objectbuffersize / (super.objectsize + 8 * columns.length);
long nodecachesize = objectbuffersize / cacheObjectChunkSize();
this.objectCache = new kelondroObjectCache(this.filename, (int) nodecachesize, nodecachesize * 300 , 4*1024*1024);
}
@ -158,7 +158,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
readOrderType();
super.setLogger(log);
long objectbuffersize = objectCachePercent * buffersize / (nodeCachePercent + objectCachePercent);
long nodecachesize = objectbuffersize / (super.objectsize + 8 * super.columns());
long nodecachesize = objectbuffersize / cacheObjectChunkSize();
this.objectCache = new kelondroObjectCache(this.filename, (int) nodecachesize, nodecachesize * 300 , 4*1024*1024);
}
@ -168,10 +168,18 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
readOrderType();
super.setLogger(log);
long objectbuffersize = objectCachePercent * buffersize / (nodeCachePercent + objectCachePercent);
long nodecachesize = objectbuffersize / (super.objectsize + 8 * super.columns());
long nodecachesize = objectbuffersize / cacheObjectChunkSize();
this.objectCache = new kelondroObjectCache(this.filename, (int) nodecachesize, nodecachesize * 300 , 4*1024*1024);
}
public final int cacheObjectChunkSize() {
return super.objectsize + 8 * super.columns();
}
public String[] cacheObjectStatus() {
return this.objectCache.status();
}
private void writeOrderType() {
try {
super.setDescription(objectOrder.signature().getBytes());

@ -77,14 +77,18 @@ public class plasmaCrawlProfile {
domsCache = new HashMap();
}
public int[] dbCacheChunkSize() {
return profileTable.cacheChunkSize();
public int[] dbCacheNodeChunkSize() {
return profileTable.cacheNodeChunkSize();
}
public int[] dbCacheFillStatus() {
return profileTable.cacheFillStatus();
public int[] dbCacheNodeFillStatus() {
return profileTable.cacheNodeFillStatus();
}
public String[] dbCacheObjectStatus() {
return profileTable.cacheObjectStatus();
}
private void resetDatabase() {
// deletes the profile database and creates a new one
if (profileTable != null) try { profileTable.close(); } catch (IOException e) {}

@ -85,14 +85,18 @@ public class plasmaCrawlRobotsTxt {
}
}
public int[] dbCacheChunkSize() {
return robotsTable.cacheChunkSize();
public int[] dbCacheNodeChunkSize() {
return robotsTable.cacheNodeChunkSize();
}
public int[] dbCacheFillStatus() {
return robotsTable.cacheFillStatus();
public int[] dbCacheNodeFillStatus() {
return robotsTable.cacheNodeFillStatus();
}
public String[] dbCacheObjectStatus() {
return robotsTable.cacheObjectStatus();
}
private void resetDatabase() {
// deletes the robots.txt database and creates a new one
if (robotsTable != null) try {

@ -202,13 +202,17 @@ public final class plasmaHTCache {
}
public int[] dbCacheChunkSize() {
return this.responseHeaderDB.cacheChunkSize();
return this.responseHeaderDB.cacheNodeChunkSize();
}
public int[] dbCacheFillStatus() {
return this.responseHeaderDB.cacheFillStatus();
return this.responseHeaderDB.cacheNodeFillStatus();
}
public String[] dbCacheObjectStatus() {
return this.responseHeaderDB.cacheObjectStatus();
}
public void push(Entry entry) {
synchronized (this.cacheStack) {
this.cacheStack.add(entry);

@ -133,6 +133,10 @@ public final class plasmaWordIndex {
return assortmentCluster.cacheFillStatusCml();
}
public String[] assortmentsCacheObjectStatus() {
return assortmentCluster.cacheObjectStatus();
}
public void setMaxWordCount(int maxWords) {
ramCache.setMaxWordCount(maxWords);
}

@ -261,12 +261,16 @@ public final class plasmaWordIndexAssortment {
return assortments.size();
}
public int[] cacheChunkSize() {
return assortments.cacheChunkSize();
public int[] cacheNodeChunkSize() {
return assortments.cacheNodeChunkSize();
}
public int[] cacheFillStatus() {
return assortments.cacheFillStatus();
public int[] cacheNodeFillStatus() {
return assortments.cacheNodeFillStatus();
}
public String[] dbCacheObjectStatus() {
return assortments.cacheObjectStatus();
}
public void close() {

@ -52,6 +52,7 @@ import java.util.HashSet;
import java.util.Iterator;
import de.anomic.kelondro.kelondroNaturalOrder;
import de.anomic.kelondro.kelondroObjectCache;
import de.anomic.kelondro.kelondroRecords;
import de.anomic.kelondro.kelondroMergeIterator;
import de.anomic.server.logging.serverLog;
@ -266,7 +267,7 @@ public final class plasmaWordIndexAssortmentCluster {
int[] i = new int[]{0, 0, 0};
int[] a = new int[3];
for (int j = 0; j < clusterCount; j++) {
a = assortments[j].cacheChunkSize();
a = assortments[j].cacheNodeChunkSize();
i[kelondroRecords.CP_LOW] += a[kelondroRecords.CP_LOW];
i[kelondroRecords.CP_MEDIUM] += a[kelondroRecords.CP_MEDIUM];
i[kelondroRecords.CP_HIGH] += a[kelondroRecords.CP_HIGH];
@ -280,12 +281,18 @@ public final class plasmaWordIndexAssortmentCluster {
public int[] cacheFillStatusCml() {
int[] a, cml = new int[]{0, 0, 0, 0};
for (int i = 0; i < clusterCount; i++) {
a = assortments[i].cacheFillStatus();
a = assortments[i].cacheNodeFillStatus();
for (int j = 0; j < 4; j++) cml[j] += a[j];
}
return cml;
}
public String[] cacheObjectStatus() {
String[][] a = new String[assortments.length][];
for (int i = 0; i < assortments.length; i++) a[i] = assortments[i].dbCacheObjectStatus();
return kelondroObjectCache.combinedStatus(a, a.length);
}
public void close() {
for (int i = 0; i < clusterCount; i++) assortments[i].close();
}

@ -96,14 +96,18 @@ public class yacyNewsDB {
news = createDB(path, bufferkb);
}
public int[] dbCacheChunkSize() {
return news.cacheChunkSize();
public int[] dbCacheNodeChunkSize() {
return news.cacheNodeChunkSize();
}
public int[] dbCacheFillStatus() {
return news.cacheFillStatus();
public int[] dbCacheNodeFillStatus() {
return news.cacheNodeFillStatus();
}
public String[] dbCacheObjectStatus() {
return news.cacheObjectStatus();
}
public void close() {
if (news != null) try {news.close();} catch (IOException e) {}
news = null;

@ -106,12 +106,16 @@ public class yacyNewsPool {
return newsDB.size();
}
public int[] dbCacheChunkSize() {
return newsDB.dbCacheChunkSize();
public int[] dbCacheNodeChunkSize() {
return newsDB.dbCacheNodeChunkSize();
}
public int[] dbCacheFillStatus() {
return newsDB.dbCacheFillStatus();
public int[] dbCacheNodeFillStatus() {
return newsDB.dbCacheNodeFillStatus();
}
public String[] dbCacheObjectStatus() {
return newsDB.dbCacheObjectStatus();
}
public void publishMyNews(yacyNewsRecord record) throws IOException {

@ -64,6 +64,7 @@ import de.anomic.kelondro.kelondroDyn;
import de.anomic.kelondro.kelondroException;
import de.anomic.kelondro.kelondroMScoreCluster;
import de.anomic.kelondro.kelondroMap;
import de.anomic.kelondro.kelondroObjectCache;
import de.anomic.kelondro.kelondroRecords;
import de.anomic.plasma.plasmaSwitchboard;
import de.anomic.server.serverCore;
@ -169,10 +170,10 @@ public final class yacySeedDB {
} catch (IOException e) {}
}
public int[] dbCacheChunkSize() {
int[] ac = seedActiveDB.cacheChunkSize();
int[] pa = seedPassiveDB.cacheChunkSize();
int[] po = seedPotentialDB.cacheChunkSize();
public int[] dbCacheNodeChunkSize() {
int[] ac = seedActiveDB.cacheNodeChunkSize();
int[] pa = seedPassiveDB.cacheNodeChunkSize();
int[] po = seedPotentialDB.cacheNodeChunkSize();
int[] i = new int[3];
i[kelondroRecords.CP_LOW] = (ac[kelondroRecords.CP_LOW] + pa[kelondroRecords.CP_LOW] + po[kelondroRecords.CP_LOW]) / 3;
i[kelondroRecords.CP_MEDIUM] = (ac[kelondroRecords.CP_MEDIUM] + pa[kelondroRecords.CP_MEDIUM] + po[kelondroRecords.CP_MEDIUM]) / 3;
@ -180,13 +181,20 @@ public final class yacySeedDB {
return i;
}
public int[] dbCacheFillStatus() {
int[] ac = seedActiveDB.cacheFillStatus();
int[] pa = seedPassiveDB.cacheFillStatus();
int[] po = seedPotentialDB.cacheFillStatus();
public int[] dbCacheNodeFillStatus() {
int[] ac = seedActiveDB.cacheNodeFillStatus();
int[] pa = seedPassiveDB.cacheNodeFillStatus();
int[] po = seedPotentialDB.cacheNodeFillStatus();
return new int[]{ac[0] + pa[0] + po[0], ac[1] + pa[1] + po[1], ac[2] + pa[2] + po[2], ac[3] + pa[3] + po[3]};
}
public String[] dbCacheObjectStatus() {
return kelondroObjectCache.combinedStatus(new String[][] {
seedActiveDB.cacheObjectStatus(),
seedPassiveDB.cacheObjectStatus(),
seedPotentialDB.cacheObjectStatus() }, 3);
}
private synchronized kelondroMap openSeedTable(File seedDBFile) {
if (seedDBFile.exists()) try {
// open existing seed database

Loading…
Cancel
Save