enhanced memory allocation during database access:

- refactoring of kelondroRecords; this class is now divided into
  kelondroAbstractRecords, kelondroRecords, kelondroCachedRecords, kelondroHandle and kelondroNode
- better abstraction of kelondroNodes, such nodes may now be crated by different classes
- a new Node defining class kelondroEcoRecords defines Nodes that do not need so much allocation and System.arraycopy
- there is less memory transfer on the bus, especially for collection index
- now half of memory needed for web index access


git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@4024 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 18 years ago
parent 57a5b6fa71
commit 9628db6cdc

@ -50,8 +50,8 @@ import java.util.Map;
import de.anomic.http.httpHeader;
import de.anomic.kelondro.kelondroCache;
import de.anomic.kelondro.kelondroCachedRecords;
import de.anomic.kelondro.kelondroFlexTable;
import de.anomic.kelondro.kelondroRecords;
import de.anomic.plasma.plasmaSwitchboard;
import de.anomic.server.serverDomains;
import de.anomic.server.serverFileUtils;
@ -136,12 +136,12 @@ public class PerformanceMemory_p {
prop.put("TableIndexTotalMem", totalmem / (1024 * 1024));
// write node cache table
i = kelondroRecords.filenames();
i = kelondroCachedRecords.filenames();
c = 0;
totalmem = 0;
while (i.hasNext()) {
filename = (String) i.next();
map = (Map) kelondroRecords.memoryStats(filename);
map = (Map) kelondroCachedRecords.memoryStats(filename);
mem = Long.parseLong((String) map.get("nodeCacheMem"));
totalmem += mem;
prop.put("NodeList_" + c + "_nodeCachePath", ((p = filename.indexOf("DATA")) < 0) ? filename : filename.substring(p));
@ -157,8 +157,8 @@ public class PerformanceMemory_p {
c++;
}
prop.put("NodeList", c);
prop.put("nodeCacheStopGrow", kelondroRecords.getMemStopGrow() / (1024 * 1024));
prop.put("nodeCacheStartShrink", kelondroRecords.getMemStartShrink() / (1024 * 1024));
prop.put("nodeCacheStopGrow", kelondroCachedRecords.getMemStopGrow() / (1024 * 1024));
prop.put("nodeCacheStartShrink", kelondroCachedRecords.getMemStartShrink() / (1024 * 1024));
prop.put("nodeCacheTotalMem", totalmem / (1024 * 1024));
// write object cache table

@ -1,6 +1,6 @@
// indexRAMRI.java
// (C) 2005, 2006 by Michael Peter Christen; mc@anomic.de, Frankfurt a. M., Germany
// first published 2005 on http://www.anomic.de
// (C) 2005, 2006 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 2005 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//

@ -45,8 +45,6 @@
package de.anomic.kelondro;
import de.anomic.kelondro.kelondroRecords.Node;
public abstract class kelondroAbstractOrder implements kelondroOrder {
protected byte[] zero = null;
@ -66,8 +64,8 @@ public abstract class kelondroAbstractOrder implements kelondroOrder {
public int compare(Object a, Object b) {
if ((a instanceof byte[]) && (b instanceof byte[])) {
return compare((byte[]) a, (byte[]) b);
} else if ((a instanceof Node) && (b instanceof Node)) {
return compare(((Node) a).getKey(), ((Node) b).getKey());
} else if ((a instanceof kelondroNode) && (b instanceof kelondroNode)) {
return compare(((kelondroNode) a).getKey(), ((kelondroNode) b).getKey());
} else if ((a instanceof String) && (b instanceof String)) {
return compare(((String) a).getBytes(), ((String) b).getBytes());
} else if ((a instanceof kelondroRow.Entry) && (b instanceof kelondroRow.Entry)) {

File diff suppressed because it is too large Load Diff

@ -83,7 +83,7 @@ public final class kelondroBufferedIOChunks extends kelondroAbstractIOChunks imp
// check commit time
if ((bufferCurrSize > bufferMaxSize) ||
(this.lastCommit + this.commitTimeout < System.currentTimeMillis())) {
(this.lastCommit + this.commitTimeout > System.currentTimeMillis())) {
commit();
this.lastCommit = System.currentTimeMillis();
}
@ -120,12 +120,12 @@ public final class kelondroBufferedIOChunks extends kelondroAbstractIOChunks imp
System.arraycopy(b, off, bb, 0, len);
synchronized (buffer) {
buffer.put(new Long(pos + off), bb);
bufferCurrSize += overhead + pos + off;
bufferCurrSize += overhead + len;
}
// check commit time
if ((bufferCurrSize > bufferMaxSize) ||
(this.lastCommit + this.commitTimeout < System.currentTimeMillis())) {
(this.lastCommit + this.commitTimeout > System.currentTimeMillis())) {
commit();
this.lastCommit = System.currentTimeMillis();
}

@ -161,7 +161,7 @@ public class kelondroCache implements kelondroIndex {
}
private int cacheGrowStatus() {
return kelondroRecords.cacheGrowStatus(serverMemory.available(), memStopGrow, memStartShrink);
return kelondroCachedRecords.cacheGrowStatus(serverMemory.available(), memStopGrow, memStartShrink);
}
private void flushUnique() throws IOException {

@ -0,0 +1,638 @@
// kelondroCachedRecords.java
// (C) 2003 - 2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 2003 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
// $LastChangedRevision: 1986 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.kelondro;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
import de.anomic.server.serverMemory;
public class kelondroCachedRecords extends kelondroAbstractRecords implements kelondroRecords {
// memory calculation
private static final int element_in_cache = 4; // for kelondroCollectionObjectMap: 4; for HashMap: 52
// static supervision objects: recognize and coordinate all activites
private static TreeMap recordTracker = new TreeMap(); // a String/filename - kelondroTray mapping
private static long memStopGrow = 10000000; // a limit for the node cache to stop growing if less than this memory amount is available
private static long memStartShrink = 6000000; // a limit for the node cache to start with shrinking if less than this memory amount is available
// caching buffer
private kelondroIntBytesMap cacheHeaders; // the cache; holds overhead values and key element
private int readHit, readMiss, writeUnique, writeDouble, cacheDelete, cacheFlush;
public kelondroCachedRecords(
File file, boolean useNodeCache, long preloadTime,
short ohbytec, short ohhandlec,
kelondroRow rowdef, int FHandles, int txtProps, int txtPropWidth) throws IOException {
super(file, useNodeCache, ohbytec, ohhandlec, rowdef, FHandles, txtProps, txtPropWidth);
initCache(useNodeCache, preloadTime);
if (useNodeCache) recordTracker.put(this.filename, this);
}
public kelondroCachedRecords(
kelondroRA ra, String filename, boolean useNodeCache, long preloadTime,
short ohbytec, short ohhandlec,
kelondroRow rowdef, int FHandles, int txtProps, int txtPropWidth,
boolean exitOnFail) {
super(ra, filename, useNodeCache, ohbytec, ohhandlec, rowdef, FHandles, txtProps, txtPropWidth, exitOnFail);
initCache(useNodeCache, preloadTime);
if (useNodeCache) recordTracker.put(this.filename, this);
}
public kelondroCachedRecords(
kelondroRA ra, String filename, boolean useNodeCache, long preloadTime) throws IOException{
super(ra, filename, useNodeCache);
initCache(useNodeCache, preloadTime);
if (useNodeCache) recordTracker.put(this.filename, this);
}
private void initCache(boolean useNodeCache, long preloadTime) {
if (useNodeCache) {
this.cacheHeaders = new kelondroIntBytesMap(this.headchunksize, 0);
} else {
this.cacheHeaders = null;
}
this.readHit = 0;
this.readMiss = 0;
this.writeUnique = 0;
this.writeDouble = 0;
this.cacheDelete = 0;
this.cacheFlush = 0;
// pre-load node cache
if ((preloadTime > 0) && (useNodeCache)) {
long stop = System.currentTimeMillis() + preloadTime;
int count = 0;
try {
Iterator i = contentNodes(preloadTime);
CacheNode n;
while ((System.currentTimeMillis() < stop) && (cacheGrowStatus() == 2) && (i.hasNext())) {
n = (CacheNode) i.next();
cacheHeaders.addb(n.handle().index, n.headChunk);
count++;
}
cacheHeaders.flush();
logFine("preloaded " + count + " records into cache");
} catch (kelondroException e) {
// the contentNodes iterator had a time-out; we don't do a preload
logFine("could not preload records: " + e.getMessage());
}
}
}
private int cacheGrowStatus() {
long available = serverMemory.available();
if ((cacheHeaders != null) && (available < cacheHeaders.memoryNeededForGrow())) return 0;
return cacheGrowStatus(available, memStopGrow, memStartShrink);
}
public static final int cacheGrowStatus(long available, long stopGrow, long startShrink) {
// returns either 0, 1 or 2:
// 0: cache is not allowed to grow, but shall shrink
// 1: cache is allowed to grow, but need not to shrink
// 2: cache is allowed to grow and must not shrink
if (available > stopGrow) return 2;
if (available > startShrink) return 1;
return 0;
}
public static void setCacheGrowStati(long memStopGrowNew, long memStartShrinkNew) {
memStopGrow = memStopGrowNew;
memStartShrink = memStartShrinkNew;
}
public static long getMemStopGrow() {
return memStopGrow ;
}
public static long getMemStartShrink() {
return memStartShrink ;
}
public static final Iterator filenames() {
// iterates string objects; all file names from record tracker
return recordTracker.keySet().iterator();
}
public static final Map memoryStats(String filename) {
// returns a map for each file in the tracker;
// the map represents properties for each record oobjects,
// i.e. for cache memory allocation
kelondroCachedRecords theRecord = (kelondroCachedRecords) recordTracker.get(filename);
return theRecord.memoryStats();
}
private final Map memoryStats() {
// returns statistical data about this object
if (cacheHeaders == null) return null;
HashMap map = new HashMap();
map.put("nodeChunkSize", Integer.toString(this.headchunksize + element_in_cache));
map.put("nodeCacheCount", Integer.toString(cacheHeaders.size()));
map.put("nodeCacheMem", Integer.toString(cacheHeaders.size() * (this.headchunksize + element_in_cache)));
map.put("nodeCacheReadHit", Integer.toString(readHit));
map.put("nodeCacheReadMiss", Integer.toString(readMiss));
map.put("nodeCacheWriteUnique", Integer.toString(writeUnique));
map.put("nodeCacheWriteDouble", Integer.toString(writeDouble));
map.put("nodeCacheDeletes", Integer.toString(cacheDelete));
map.put("nodeCacheFlushes", Integer.toString(cacheFlush));
return map;
}
protected synchronized void deleteNode(kelondroHandle handle) throws IOException {
if (cacheHeaders == null) {
super.deleteNode(handle);
} else synchronized (cacheHeaders) {
if (cacheHeaders.size() == 0) {
super.deleteNode(handle);
} else {
cacheHeaders.removeb(handle.index);
cacheDelete++;
super.deleteNode(handle);
}
}
}
protected void printCache() {
if (cacheHeaders == null) {
System.out.println("### file report: " + size() + " entries");
for (int i = 0; i < USAGE.allCount(); i++) {
// print from file to compare
System.out.print("#F " + i + ": ");
try {
for (int j = 0; j < headchunksize; j++)
System.out.print(Integer.toHexString(0xff & entryFile.readByte(j + seekpos(new kelondroHandle(i)))) + " ");
} catch (IOException e) {}
System.out.println();
}
} else {
System.out.println("### cache report: " + cacheHeaders.size() + " entries");
Iterator i = cacheHeaders.rows();
kelondroRow.Entry entry;
while (i.hasNext()) {
entry = (kelondroRow.Entry) i.next();
// print from cache
System.out.print("#C ");
printChunk(entry);
System.out.println();
// print from file to compare
/*
System.out.print("#F " + cp + " " + ((Handle) entry.getKey()).index + ": ");
try {
for (int j = 0; j < headchunksize; j++)
System.out.print(entryFile.readByte(j + seekpos((Handle) entry.getKey())) + ",");
} catch (IOException e) {}
*/
System.out.println();
}
}
System.out.println("### end report");
}
public synchronized void close() {
if (cacheHeaders == null) {
if (recordTracker.get(this.filename) != null) {
theLogger.severe("close(): file '" + this.filename + "' was tracked with record tracker, but it should not.");
}
} else {
if (recordTracker.remove(this.filename) == null) {
theLogger.severe("close(): file '" + this.filename + "' was not tracked with record tracker.");
}
}
super.close();
this.cacheHeaders = null;
}
public kelondroProfile[] profiles() {
return new kelondroProfile[]{
(cacheHeaders == null) ? new kelondroProfile() :
cacheHeaders.profile(),
entryFile.profile()
};
}
public kelondroProfile profile() {
return kelondroProfile.consolidate(profiles());
}
public void print() throws IOException {
super.print();
// print also all records
System.out.println("CACHE");
printCache();
System.out.println("--");
System.out.println("NODES");
Iterator i = new contentNodeIterator(-1);
kelondroNode n;
while (i.hasNext()) {
n = (kelondroNode) i.next();
System.out.println("NODE: " + n.toString());
}
}
public kelondroNode newNode(kelondroHandle handle, byte[] bulk, int offset) throws IOException {
return new CacheNode(handle, bulk, offset);
}
public final class CacheNode implements kelondroNode {
// an Node holds all information of one row of data. This includes the key to the entry
// which is stored as entry element at position 0
// an Node object can be created in two ways:
// 1. instantiation with an index number. After creation the Object does not hold any
// value information until such is retrieved using the getValue() method
// 2. instantiation with a value array. the values are not directly written into the
// file. Expanding the tree structure is then done using the save() method. at any
// time it is possible to verify the save state using the saved() predicate.
// Therefore an entry object has three modes:
// a: holding an index information only (saved() = true)
// b: holding value information only (saved() = false)
// c: holding index and value information at the same time (saved() = true)
// which can be the result of one of the two processes as follow:
// (i) created with index and after using the getValue() method, or
// (ii) created with values and after calling the save() method
// the method will therefore throw an IllegalStateException when the following
// process step is performed:
// - create the Node with index and call then the save() method
// this case can be decided with
// ((index != NUL) && (values == null))
// The save() method represents the insert function for the tree. Balancing functions
// are applied automatically. While balancing, the Node does never change its index key,
// but its parent/child keys.
//private byte[] ohBytes = null; // the overhead bytes, OHBYTEC values
//private Handle[] ohHandle= null; // the overhead handles, OHHANDLEC values
//private byte[][] values = null; // an array of byte[] nodes is the value vector
private kelondroHandle handle = null; // index of the entry, by default NUL means undefined
private byte[] headChunk = null; // contains ohBytes, ohHandles and the key value
private byte[] tailChunk = null; // contains all values except the key value
private boolean headChanged = false;
private boolean tailChanged = false;
public CacheNode(byte[] rowinstance) throws IOException {
// this initializer is used to create nodes from bulk-read byte arrays
assert ((rowinstance == null) || (rowinstance.length == ROW.objectsize)) : "bulkchunk.length = " + rowinstance.length + ", ROW.width(0) = " + ROW.width(0);
this.handle = new kelondroHandle(USAGE.allocatePayload(rowinstance));
// create empty chunks
this.headChunk = new byte[headchunksize];
this.tailChunk = new byte[tailchunksize];
// write content to chunks
if (rowinstance == null) {
for (int i = headchunksize - 1; i >= 0; i--) this.headChunk[i] = (byte) 0xff;
for (int i = tailchunksize - 1; i >= 0; i--) this.tailChunk[i] = (byte) 0xff;
} else {
for (int i = overhead - 1; i >= 0; i--) this.headChunk[i] = (byte) 0xff;
System.arraycopy(rowinstance, 0, this.headChunk, overhead, ROW.width(0));
System.arraycopy(rowinstance, ROW.width(0), this.tailChunk, 0, tailchunksize);
}
if (cacheHeaders != null) synchronized (cacheHeaders) {
updateNodeCache();
}
// mark chunks as changed
// if the head/tail chunks come from a file system read, setChanged should be false
// if the chunks come from a overwrite attempt, it should be true
this.headChanged = false; // we wrote the head already during allocate
this.tailChanged = false; // we write the tail already during allocate
}
public CacheNode(kelondroHandle handle, byte[] bulkchunk, int offset) throws IOException {
// this initializer is used to create nodes from bulk-read byte arrays
// if write is true, then the chunk in bulkchunk is written to the file
// othervise it is considered equal to what is stored in the file
// (that is ensured during pre-loaded enumeration)
this.handle = handle;
boolean changed;
if (handle.index >= USAGE.allCount()) {
// this causes only a write action if we create a node beyond the end of the file
USAGE.allocateRecord(handle.index, bulkchunk, offset);
changed = false; // we have already wrote the record, so it is considered as unchanged
} else {
changed = true;
}
assert ((bulkchunk == null) || (bulkchunk.length - offset >= recordsize)) : "bulkchunk.length = " + bulkchunk.length + ", offset = " + offset + ", recordsize = " + recordsize;
// create empty chunks
this.headChunk = new byte[headchunksize];
this.tailChunk = new byte[tailchunksize];
// write content to chunks
if (bulkchunk != null) {
System.arraycopy(bulkchunk, offset, this.headChunk, 0, headchunksize);
System.arraycopy(bulkchunk, offset + headchunksize, this.tailChunk, 0, tailchunksize);
}
// mark chunks as changed
this.headChanged = changed;
this.tailChanged = changed;
}
public CacheNode(kelondroHandle handle, boolean fillTail) throws IOException {
this(handle, null, 0, fillTail);
}
public CacheNode(kelondroHandle handle, CacheNode parentNode, int referenceInParent, boolean fillTail) throws IOException {
// this creates an entry with an pre-reserved entry position.
// values can be written using the setValues() method,
// but we expect that values are already there in the file.
assert (handle != null): "node handle is null";
assert (handle.index >= 0): "node handle too low: " + handle.index;
//assert (handle.index < USAGE.allCount()) : "node handle too high: " + handle.index + ", USEDC=" + USAGE.USEDC + ", FREEC=" + USAGE.FREEC;
// the parentNode can be given if an auto-fix in the following case is wanted
if (handle == null) throw new kelondroException(filename, "INTERNAL ERROR: node handle is null.");
if (handle.index >= USAGE.allCount()) {
if (parentNode == null) throw new kelondroException(filename, "INTERNAL ERROR, Node/init: node handle index " + handle.index + " exceeds size. No auto-fix node was submitted. This is a serious failure.");
try {
parentNode.setOHHandle(referenceInParent, null);
parentNode.commit();
logWarning("INTERNAL ERROR, Node/init in " + filename + ": node handle index " + handle.index + " exceeds size. The bad node has been auto-fixed");
} catch (IOException ee) {
throw new kelondroException(filename, "INTERNAL ERROR, Node/init: node handle index " + handle.index + " exceeds size. It was tried to fix the bad node, but failed with an IOException: " + ee.getMessage());
}
}
// use given handle
this.handle = new kelondroHandle(handle.index);
// check for memory availability when fillTail is requested
if ((fillTail) && (tailchunksize > 10000)) fillTail = false; // this is a fail-safe 'short version' of a memory check
// init the content
// create chunks; read them from file or cache
this.tailChunk = null;
if (cacheHeaders == null) {
if (fillTail) {
// read complete record
byte[] chunkbuffer = new byte[recordsize];
entryFile.readFully(seekpos(this.handle), chunkbuffer, 0, recordsize);
this.headChunk = new byte[headchunksize];
this.tailChunk = new byte[tailchunksize];
System.arraycopy(chunkbuffer, 0, this.headChunk, 0, headchunksize);
System.arraycopy(chunkbuffer, headchunksize, this.tailChunk, 0, tailchunksize);
chunkbuffer = null;
} else {
// read overhead and key
this.headChunk = new byte[headchunksize];
this.tailChunk = null;
entryFile.readFully(seekpos(this.handle), this.headChunk, 0, headchunksize);
}
} else synchronized(cacheHeaders) {
byte[] cacheEntry = null;
cacheEntry = cacheHeaders.getb(this.handle.index);
if (cacheEntry == null) {
// cache miss, we read overhead and key from file
readMiss++;
if (fillTail) {
// read complete record
byte[] chunkbuffer = new byte[recordsize];
entryFile.readFully(seekpos(this.handle), chunkbuffer, 0, recordsize);
this.headChunk = new byte[headchunksize];
this.tailChunk = new byte[tailchunksize];
System.arraycopy(chunkbuffer, 0, this.headChunk, 0, headchunksize);
System.arraycopy(chunkbuffer, headchunksize, this.tailChunk, 0, tailchunksize);
chunkbuffer = null;
} else {
// read overhead and key
this.headChunk = new byte[headchunksize];
this.tailChunk = null;
entryFile.readFully(seekpos(this.handle), this.headChunk, 0, headchunksize);
}
// if space left in cache, copy these value to the cache
updateNodeCache();
} else {
readHit++;
this.headChunk = cacheEntry;
}
}
}
private void setValue(byte[] value, int valueoffset, int valuewidth, byte[] targetarray, int targetoffset) {
if (value == null) {
while (valuewidth-- > 0) targetarray[targetoffset++] = 0;
} else {
assert ((valueoffset >= 0) && (valueoffset < value.length)) : "valueoffset = " + valueoffset;
assert ((valueoffset + valuewidth <= value.length)) : "valueoffset = " + valueoffset + ", valuewidth = " + valuewidth + ", value.length = " + value.length;
assert ((targetoffset >= 0) && (targetoffset < targetarray.length)) : "targetoffset = " + targetoffset;
assert ((targetoffset + valuewidth <= targetarray.length)) : "targetoffset = " + targetoffset + ", valuewidth = " + valuewidth + ", targetarray.length = " + targetarray.length;
System.arraycopy(value, valueoffset, targetarray, targetoffset, Math.min(value.length, valuewidth)); // error?
while (valuewidth-- > value.length) targetarray[targetoffset + valuewidth] = 0;
}
}
public kelondroHandle handle() {
// if this entry has an index, return it
if (this.handle.index == kelondroHandle.NUL) throw new kelondroException(filename, "the entry has no index assigned");
return this.handle;
}
public void setOHByte(int i, byte b) {
if (i >= OHBYTEC) throw new IllegalArgumentException("setOHByte: wrong index " + i);
if (this.handle.index == kelondroHandle.NUL) throw new kelondroException(filename, "setOHByte: no handle assigned");
this.headChunk[i] = b;
this.headChanged = true;
}
public void setOHHandle(int i, kelondroHandle otherhandle) {
assert (i < OHHANDLEC): "setOHHandle: wrong array size " + i;
assert (this.handle.index != kelondroHandle.NUL): "setOHHandle: no handle assigned ind file" + filename;
if (otherhandle == null) {
NUL2bytes(this.headChunk, OHBYTEC + 4 * i);
} else {
if (otherhandle.index >= USAGE.allCount()) throw new kelondroException(filename, "INTERNAL ERROR, setOHHandles: handle " + i + " exceeds file size (" + handle.index + " >= " + USAGE.allCount() + ")");
int2bytes(otherhandle.index, this.headChunk, OHBYTEC + 4 * i);
}
this.headChanged = true;
}
public byte getOHByte(int i) {
if (i >= OHBYTEC) throw new IllegalArgumentException("getOHByte: wrong index " + i);
if (this.handle.index == kelondroHandle.NUL) throw new kelondroException(filename, "Cannot load OH values");
return this.headChunk[i];
}
public kelondroHandle getOHHandle(int i) {
if (this.handle.index == kelondroHandle.NUL) throw new kelondroException(filename, "Cannot load OH values");
assert (i < OHHANDLEC): "handle index out of bounds: " + i + " in file " + filename;
int h = bytes2int(this.headChunk, OHBYTEC + 4 * i);
return (h == kelondroHandle.NUL) ? null : new kelondroHandle(h);
}
public synchronized void setValueRow(byte[] row) throws IOException {
// if the index is defined, then write values directly to the file, else only to the object
if ((row != null) && (row.length != ROW.objectsize())) throw new IOException("setValueRow with wrong (" + row.length + ") row length instead correct: " + ROW.objectsize());
// set values
if (this.handle.index != kelondroHandle.NUL) {
setValue(row, 0, ROW.width(0), headChunk, overhead);
if (ROW.columns() > 1) setValue(row, ROW.width(0), tailchunksize, tailChunk, 0);
}
this.headChanged = true;
this.tailChanged = true;
}
public synchronized boolean valid() {
// returns true if the key starts with non-zero byte
// this may help to detect deleted entries
return (headChunk[overhead] != 0) && ((headChunk[overhead] != -128) || (headChunk[overhead + 1] != 0));
}
public synchronized byte[] getKey() {
// read key
return trimCopy(headChunk, overhead, ROW.width(0));
}
public synchronized byte[] getValueRow() throws IOException {
if (this.tailChunk == null) {
// load all values from the database file
this.tailChunk = new byte[tailchunksize];
// read values
entryFile.readFully(seekpos(this.handle) + (long) headchunksize, this.tailChunk, 0, this.tailChunk.length);
}
// create return value
byte[] row = new byte[ROW.objectsize()];
// read key
System.arraycopy(headChunk, overhead, row, 0, ROW.width(0));
// read remaining values
System.arraycopy(tailChunk, 0, row, ROW.width(0), tailchunksize);
return row;
}
public synchronized void commit() throws IOException {
// this must be called after all write operations to the node are
// finished
// place the data to the file
if (this.headChunk == null) {
// there is nothing to save
throw new kelondroException(filename, "no values to save (header missing)");
}
boolean doCommit = this.headChanged || this.tailChanged;
// save head
synchronized (entryFile) {
if (this.headChanged) {
//System.out.println("WRITEH(" + filename + ", " + seekpos(this.handle) + ", " + this.headChunk.length + ")");
assert (headChunk == null) || (headChunk.length == headchunksize);
entryFile.write(seekpos(this.handle), (this.headChunk == null) ? new byte[headchunksize] : this.headChunk);
updateNodeCache();
this.headChanged = false;
}
// save tail
if ((this.tailChunk != null) && (this.tailChanged)) {
//System.out.println("WRITET(" + filename + ", " + (seekpos(this.handle) + headchunksize) + ", " + this.tailChunk.length + ")");
assert (tailChunk == null) || (tailChunk.length == tailchunksize);
entryFile.write(seekpos(this.handle) + headchunksize, (this.tailChunk == null) ? new byte[tailchunksize] : this.tailChunk);
this.tailChanged = false;
}
if (doCommit) entryFile.commit();
}
}
public String toString() {
if (this.handle.index == kelondroHandle.NUL) return "NULL";
String s = Integer.toHexString(this.handle.index);
kelondroHandle h;
while (s.length() < 4) s = "0" + s;
try {
for (int i = 0; i < OHBYTEC; i++) s = s + ":b" + getOHByte(i);
for (int i = 0; i < OHHANDLEC; i++) {
h = getOHHandle(i);
if (h == null) s = s + ":hNULL"; else s = s + ":h" + h.toString();
}
kelondroRow.Entry content = row().newEntry(getValueRow());
for (int i = 0; i < row().columns(); i++) s = s + ":" + ((content.empty(i)) ? "NULL" : content.getColString(i, "UTF-8").trim());
} catch (IOException e) {
s = s + ":***LOAD ERROR***:" + e.getMessage();
}
return s;
}
private boolean cacheSpace() {
// check for space in cache
// should be only called within a synchronized(cacheHeaders) environment
// returns true if it is allowed to add another entry to the cache
// returns false if the cache is considered to be full
if (cacheHeaders == null) return false; // no caching
if (cacheHeaders.size() == 0) return true; // nothing there to flush
if (cacheGrowStatus() == 2) return true; // no need to flush cache space
// just delete any of the entries
if (cacheGrowStatus() <= 1) synchronized (cacheHeaders) {
cacheHeaders.removeoneb();
cacheFlush++;
}
return cacheGrowStatus() > 0;
}
private void updateNodeCache() {
if (this.handle == null) return; // wrong access
if (this.headChunk == null) return; // nothing there to cache
if (cacheHeaders == null) return; // we do not use the cache
if (cacheSpace()) synchronized (cacheHeaders) {
// generate cache entry
//byte[] cacheEntry = new byte[headchunksize];
//System.arraycopy(headChunk, 0, cacheEntry, 0, headchunksize);
// store the cache entry
boolean upd = false;
upd = (cacheHeaders.putb(this.handle.index, headChunk) != null);
if (upd) writeDouble++; else writeUnique++;
//System.out.println("kelondroRecords cache4" + filename + ": cache record size = " + (memBefore - Runtime.getRuntime().freeMemory()) + " bytes" + ((newentry) ? " new" : ""));
//printCache();
} else {
// there shall be no entry in the cache. If one exists, we remove it
boolean rem = false;
rem = (cacheHeaders.removeb(this.handle.index) != null);
if (rem) cacheDelete++;
}
}
}
}

@ -0,0 +1,292 @@
// kelondroEcoRecords.java
// (C) 2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 03.07.2007 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
// $LastChangedRevision: 1986 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.kelondro;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.TreeMap;
public class kelondroEcoRecords extends kelondroAbstractRecords {
// static supervision objects: recognize and coordinate all activites
private static TreeMap recordTracker = new TreeMap(); // a String/filename - kelondroTray mapping
public kelondroEcoRecords(
File file,
short ohbytec, short ohhandlec,
kelondroRow rowdef, int FHandles, int txtProps, int txtPropWidth) throws IOException {
super(file, true, ohbytec, ohhandlec, rowdef, FHandles, txtProps, txtPropWidth);
recordTracker.put(this.filename, this);
}
public kelondroEcoRecords(
kelondroRA ra, String filename,
short ohbytec, short ohhandlec,
kelondroRow rowdef, int FHandles, int txtProps, int txtPropWidth,
boolean exitOnFail) {
super(ra, filename, true, ohbytec, ohhandlec, rowdef, FHandles, txtProps, txtPropWidth, exitOnFail);
recordTracker.put(this.filename, this);
}
public kelondroEcoRecords(
kelondroRA ra, String filename) throws IOException{
super(ra, filename, true);
recordTracker.put(this.filename, this);
}
public static final Iterator filenames() {
// iterates string objects; all file names from record tracker
return recordTracker.keySet().iterator();
}
protected synchronized void deleteNode(kelondroHandle handle) throws IOException {
super.deleteNode(handle);
}
public synchronized void close() {
if (recordTracker.get(this.filename) != null) {
theLogger.severe("close(): file '" + this.filename + "' was tracked with record tracker, but it should not.");
}
super.close();
}
public kelondroNode newNode(kelondroHandle handle, byte[] bulk, int offset) throws IOException {
return new EcoNode(handle, bulk, offset);
}
public final class EcoNode implements kelondroNode {
private kelondroHandle handle = null; // index of the entry, by default NUL means undefined
private byte[] ohChunk = null; // contains overhead values
private byte[] bodyChunk = null; // contains all row values
private boolean ohChanged = false;
private boolean bodyChanged = false;
public EcoNode(byte[] rowinstance) throws IOException {
// this initializer is used to create nodes from bulk-read byte arrays
assert ((rowinstance == null) || (rowinstance.length == ROW.objectsize)) : "bulkchunk.length = " + rowinstance.length + ", ROW.width(0) = " + ROW.width(0);
this.handle = new kelondroHandle(USAGE.allocatePayload(rowinstance));
// create chunks
this.ohChunk = new byte[overhead];
for (int i = this.ohChunk.length - 1; i >= 0; i--) this.ohChunk[i] = (byte) 0xff;
if (rowinstance == null) {
this.bodyChunk = new byte[ROW.objectsize()];
for (int i = this.bodyChunk.length - 1; i >= 0; i--) this.bodyChunk[i] = (byte) 0xff;
} else {
this.bodyChunk = rowinstance;
}
// mark chunks as not changed
this.ohChanged = false;
this.bodyChanged = false;
}
public EcoNode(kelondroHandle handle, byte[] bulkchunk, int offset) throws IOException {
// this initializer is used to create nodes from bulk-read byte arrays
// if write is true, then the chunk in bulkchunk is written to the file
// othervise it is considered equal to what is stored in the file
// (that is ensured during pre-loaded enumeration)
this.handle = handle;
boolean changed;
if (handle.index >= USAGE.allCount()) {
// this causes only a write action if we create a node beyond the end of the file
USAGE.allocateRecord(handle.index, bulkchunk, offset);
changed = false; // we have already wrote the record, so it is considered as unchanged
} else {
changed = true;
}
assert ((bulkchunk == null) || (bulkchunk.length - offset >= recordsize)) : "bulkchunk.length = " + bulkchunk.length + ", offset = " + offset + ", recordsize = " + recordsize;
if ((offset == 0) && (overhead == 0) && ((bulkchunk == null) || (bulkchunk.length == ROW.objectsize()))) {
this.ohChunk = new byte[0];
if (bulkchunk == null) {
this.bodyChunk = new byte[ROW.objectsize()];
} else {
this.bodyChunk = bulkchunk;
}
} else {
// create empty chunks
this.ohChunk = new byte[overhead];
this.bodyChunk = new byte[ROW.objectsize()];
// write content to chunks
if (bulkchunk != null) {
System.arraycopy(bulkchunk, offset, this.ohChunk, 0, overhead);
System.arraycopy(bulkchunk, offset + overhead, this.bodyChunk, 0, ROW.objectsize());
}
}
// mark chunks as changed
this.ohChanged = changed;
this.bodyChanged = changed;
}
public EcoNode(kelondroHandle handle) throws IOException {
// this creates an entry with an pre-reserved entry position.
// values can be written using the setValues() method,
// but we expect that values are already there in the file.
assert (handle != null): "node handle is null";
assert (handle.index >= 0): "node handle too low: " + handle.index;
//assert (handle.index < USAGE.allCount()) : "node handle too high: " + handle.index + ", USEDC=" + USAGE.USEDC + ", FREEC=" + USAGE.FREEC;
// the parentNode can be given if an auto-fix in the following case is wanted
if (handle == null) throw new kelondroException(filename, "INTERNAL ERROR: node handle is null.");
if (handle.index >= USAGE.allCount()) {
throw new kelondroException(filename, "INTERNAL ERROR, Node/init: node handle index " + handle.index + " exceeds size. No auto-fix node was submitted. This is a serious failure.");
}
// use given handle
this.handle = new kelondroHandle(handle.index);
// read complete record
byte[] bulkchunk = new byte[recordsize];
entryFile.readFully(seekpos(this.handle), bulkchunk, 0, recordsize);
if ((overhead == 0) && (bulkchunk.length == ROW.objectsize())) {
this.ohChunk = new byte[0];
this.bodyChunk = bulkchunk;
} else {
// create empty chunks
this.ohChunk = new byte[overhead];
this.bodyChunk = new byte[ROW.objectsize()];
// write content to chunks
if (bulkchunk != null) {
System.arraycopy(bulkchunk, 0, this.ohChunk, 0, overhead);
System.arraycopy(bulkchunk, 0 + overhead, this.bodyChunk, 0, ROW.objectsize());
}
}
// mark chunks as not changed
this.ohChanged = false;
this.bodyChanged = false;
}
public kelondroHandle handle() {
// if this entry has an index, return it
if (this.handle.index == kelondroHandle.NUL) throw new kelondroException(filename, "the entry has no index assigned");
return this.handle;
}
public void setOHByte(int i, byte b) {
if (i >= OHBYTEC) throw new IllegalArgumentException("setOHByte: wrong index " + i);
if (this.handle.index == kelondroHandle.NUL) throw new kelondroException(filename, "setOHByte: no handle assigned");
this.ohChunk[i] = b;
this.ohChanged = true;
}
public void setOHHandle(int i, kelondroHandle otherhandle) {
assert (i < OHHANDLEC): "setOHHandle: wrong array size " + i;
assert (this.handle.index != kelondroHandle.NUL): "setOHHandle: no handle assigned ind file" + filename;
if (otherhandle == null) {
NUL2bytes(this.ohChunk, OHBYTEC + 4 * i);
} else {
if (otherhandle.index >= USAGE.allCount()) throw new kelondroException(filename, "INTERNAL ERROR, setOHHandles: handle " + i + " exceeds file size (" + handle.index + " >= " + USAGE.allCount() + ")");
int2bytes(otherhandle.index, this.ohChunk, OHBYTEC + 4 * i);
}
this.ohChanged = true;
}
public byte getOHByte(int i) {
if (i >= OHBYTEC) throw new IllegalArgumentException("getOHByte: wrong index " + i);
if (this.handle.index == kelondroHandle.NUL) throw new kelondroException(filename, "Cannot load OH values");
return this.ohChunk[i];
}
public kelondroHandle getOHHandle(int i) {
if (this.handle.index == kelondroHandle.NUL) throw new kelondroException(filename, "Cannot load OH values");
assert (i < OHHANDLEC): "handle index out of bounds: " + i + " in file " + filename;
int h = bytes2int(this.ohChunk, OHBYTEC + 4 * i);
return (h == kelondroHandle.NUL) ? null : new kelondroHandle(h);
}
public synchronized void setValueRow(byte[] row) throws IOException {
// if the index is defined, then write values directly to the file, else only to the object
if ((row != null) && (row.length != ROW.objectsize())) throw new IOException("setValueRow with wrong (" + row.length + ") row length instead correct: " + ROW.objectsize());
// set values
if (this.handle.index != kelondroHandle.NUL) {
this.bodyChunk = row;
this.bodyChanged = true;
}
}
public synchronized boolean valid() {
// returns true if the key starts with non-zero byte
// this may help to detect deleted entries
return (this.bodyChunk[0] != 0) && ((this.bodyChunk[0] != -128) || (this.bodyChunk[1] != 0));
}
public synchronized byte[] getKey() {
// read key
return trimCopy(this.bodyChunk, 0, ROW.width(0));
}
public synchronized byte[] getValueRow() throws IOException {
if (this.bodyChunk == null) {
// load all values from the database file
this.bodyChunk = new byte[ROW.objectsize()];
// read values
entryFile.readFully(seekpos(this.handle) + (long) overhead, this.bodyChunk, 0, this.bodyChunk.length);
}
return this.bodyChunk;
}
public synchronized void commit() throws IOException {
// this must be called after all write operations to the node are finished
// place the data to the file
boolean doCommit = this.ohChanged || this.bodyChanged;
// save head
synchronized (entryFile) {
if (this.ohChanged) {
//System.out.println("WRITEH(" + filename + ", " + seekpos(this.handle) + ", " + this.headChunk.length + ")");
assert (ohChunk == null) || (ohChunk.length == headchunksize);
entryFile.write(seekpos(this.handle), (this.ohChunk == null) ? new byte[overhead] : this.ohChunk);
this.ohChanged = false;
}
// save tail
if ((this.bodyChunk != null) && (this.bodyChanged)) {
//System.out.println("WRITET(" + filename + ", " + (seekpos(this.handle) + headchunksize) + ", " + this.tailChunk.length + ")");
assert (this.bodyChunk == null) || (this.bodyChunk.length == ROW.objectsize());
entryFile.write(seekpos(this.handle) + overhead, (this.bodyChunk == null) ? new byte[ROW.objectsize()] : this.bodyChunk);
this.bodyChanged = false;
}
if (doCommit) entryFile.commit();
}
}
}
}

@ -52,7 +52,7 @@ import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
public class kelondroFixedWidthArray extends kelondroRecords implements kelondroArray {
public class kelondroFixedWidthArray extends kelondroEcoRecords implements kelondroArray {
// define the Over-Head-Array
private static short thisOHBytes = 0; // our record definition does not need extra bytes
@ -60,10 +60,10 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
public kelondroFixedWidthArray(File file, kelondroRow rowdef, int intprops) throws IOException {
// this creates a new array
super(file, false, 0, thisOHBytes, thisOHHandles, rowdef, intprops, rowdef.columns() /* txtProps */, 80 /* txtPropWidth */);
super(file, thisOHBytes, thisOHHandles, rowdef, intprops, rowdef.columns() /* txtProps */, 80 /* txtPropWidth */);
if (!(super.fileExisted)) {
for (int i = 0; i < intprops; i++) {
setHandle(i, new Handle(NUL));
setHandle(i, new kelondroHandle(kelondroHandle.NUL));
}
// store column description
for (int i = 0; i < rowdef.columns(); i++) {
@ -74,9 +74,9 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
public kelondroFixedWidthArray(kelondroRA ra, String filename, kelondroRow rowdef, int intprops) throws IOException {
// this creates a new array
super(ra, filename, false, 0, thisOHBytes, thisOHHandles, rowdef, intprops, rowdef.columns() /* txtProps */, 80 /* txtPropWidth */, false);
super(ra, filename, thisOHBytes, thisOHHandles, rowdef, intprops, rowdef.columns() /* txtProps */, 80 /* txtPropWidth */, false);
for (int i = 0; i < intprops; i++) {
setHandle(i, new Handle(0));
setHandle(i, new kelondroHandle(0));
}
// store column description
for (int i = 0; i < rowdef.columns(); i++) {
@ -104,8 +104,8 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
// this writes a row without reading the row from the file system first
// create a node at position index with rowentry
Handle h = new Handle(index);
commit(newNode(h, (rowentry == null) ? null : rowentry.bytes(), 0), CP_NONE);
kelondroHandle h = new kelondroHandle(index);
(new EcoNode(h, (rowentry == null) ? null : rowentry.bytes(), 0)).commit();
// attention! this newNode call wants that the OH bytes are passed within the bulkchunk
// field. Here, only the rowentry.bytes() raw payload is passed. This is valid, because
// the OHbytes and OHhandles are zero.
@ -123,19 +123,19 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
}
public synchronized kelondroRow.Entry getIfValid(int index) throws IOException {
byte[] b = getNode(new Handle(index), true).getValueRow();
byte[] b = (new EcoNode(new kelondroHandle(index))).getValueRow();
if (b[0] == 0) return null;
if ((b[0] == -128) && (b[1] == 0)) return null;
return row().newEntry(b);
}
public synchronized kelondroRow.Entry get(int index) throws IOException {
return row().newEntry(getNode(new Handle(index), true).getValueRow());
return row().newEntry(new EcoNode(new kelondroHandle(index)).getValueRow());
}
protected synchronized int seti(int index, int value) throws IOException {
int before = getHandle(index).hashCode();
setHandle(index, new Handle(value));
setHandle(index, new kelondroHandle(value));
return before;
}
@ -145,8 +145,8 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
public synchronized int add(kelondroRow.Entry rowentry) throws IOException {
// adds a new rowentry, but re-uses a previously as-deleted marked entry
Node n = newNode(rowentry.bytes());
commit(n, CP_NONE);
kelondroNode n = new EcoNode(rowentry.bytes());
n.commit();
return n.handle().hashCode();
}
@ -154,12 +154,12 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
assert (index < (super.free() + super.size())) : "remove: index " + index + " out of bounds " + (super.free() + super.size());
// get the node at position index
Handle h = new Handle(index);
Node n = getNode(h, false);
kelondroHandle h = new kelondroHandle(index);
kelondroNode n = new EcoNode(h);
// erase the row
n.setValueRow(null);
commit(n, CP_NONE);
n.commit();
// mark row as deleted so it can be re-used
deleteNode(h);
@ -211,7 +211,7 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
k.remove(1);
k.print();
k.print(true);
k.print();
k.close();
@ -227,7 +227,7 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
}
}
k.print();
k.print(true);
k.print();
k.close();
} catch (IOException e) {

@ -147,7 +147,7 @@ public class kelondroFlexTable extends kelondroFlexWidthArray implements kelondr
public synchronized boolean has(byte[] key) throws IOException {
// it is not recommended to implement or use a has predicate unless
// it can be ensured that it causes no IO
if ((kelondroRecords.debugmode) && (RAMIndex != true)) serverLog.logWarning("kelondroFlexTable", "RAM index warning in file " + super.tablename);
if ((kelondroAbstractRecords.debugmode) && (RAMIndex != true)) serverLog.logWarning("kelondroFlexTable", "RAM index warning in file " + super.tablename);
assert this.size() == index.size() : "content.size() = " + this.size() + ", index.size() = " + index.size();
return index.geti(key) >= 0;
}
@ -157,11 +157,11 @@ public class kelondroFlexTable extends kelondroFlexWidthArray implements kelondr
if (space < 0) throw new kelondroException("wrong space: " + space);
kelondroBytesIntMap ri = new kelondroBytesIntMap(super.row().column(0).cellwidth(), super.rowdef.objectOrder, space);
Iterator content = super.col[0].contentNodes(-1);
kelondroRecords.Node node;
kelondroNode node;
int i;
byte[] key;
while (content.hasNext()) {
node = (kelondroRecords.Node) content.next();
node = (kelondroNode) content.next();
i = node.handle().hashCode();
key = node.getKey();
assert (key != null) : "DEBUG: empty key in initializeRamIndex"; // should not happen; if it does, it is an error of the condentNodes iterator
@ -182,13 +182,13 @@ public class kelondroFlexTable extends kelondroFlexWidthArray implements kelondr
private kelondroIndex initializeTreeIndex(File indexfile, long preloadTime, kelondroOrder objectOrder, int primaryKey) throws IOException {
kelondroIndex treeindex = new kelondroCache(new kelondroTree(indexfile, true, preloadTime, treeIndexRow(rowdef.width(0), objectOrder), 2, 80), true, false);
Iterator content = super.col[0].contentNodes(-1);
kelondroRecords.Node node;
kelondroNode node;
kelondroRow.Entry indexentry;
int i, c = 0, all = super.col[0].size();
long start = System.currentTimeMillis();
long last = start;
while (content.hasNext()) {
node = (kelondroRecords.Node) content.next();
node = (kelondroNode) content.next();
i = node.handle().hashCode();
indexentry = treeindex.row().newEntry();
indexentry.setCol(0, node.getValueRow());

@ -162,7 +162,7 @@ public class kelondroFlexWidthArray implements kelondroArray {
// open existing files
File file = new File(tabledir, "col.000.list");
return kelondroRecords.staticsize(file);
return kelondroCachedRecords.staticsize(file);
}
public static void delete(File path, String tablename) {
@ -368,8 +368,8 @@ public class kelondroFlexWidthArray implements kelondroArray {
k.remove(1);
k.print();
k.col[0].print(true);
k.col[1].print(true);
k.col[0].print();
k.col[1].print();
k.close();
@ -391,7 +391,7 @@ public class kelondroFlexWidthArray implements kelondroArray {
}
k = new kelondroFlexWidthArray(f, "flextest", rowdef, true);
k.print();
k.col[0].print(true);
k.col[0].print();
k.close();
} catch (IOException e) {

@ -0,0 +1,86 @@
// kelondroHandle.java
// (C) 2003 - 2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 2003 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
// $LastChangedRevision: 1986 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.kelondro;
public class kelondroHandle implements Comparable {
public final static int NUL = Integer.MIN_VALUE; // the meta value for the kelondroTray' NUL abstraction
protected int index;
protected kelondroHandle(int i) {
assert (i == NUL) || (i >= 0) : "node handle index too low: " + i;
//assert (i == NUL) || (i < USAGE.allCount()) : "node handle index too high: " + i + ", USEDC=" + USAGE.USEDC + ", FREEC=" + USAGE.FREEC;
this.index = i;
//if ((USAGE != null) && (this.index != NUL)) USAGE.allocate(this.index);
}
public boolean isNUL() {
return index == NUL;
}
public String toString() {
if (index == NUL) return "NULL";
String s = Integer.toHexString(index);
while (s.length() < 4) s = "0" + s;
return s;
}
public boolean equals(kelondroHandle h) {
assert (index != NUL);
assert (h.index != NUL);
return (this.index == h.index);
}
public boolean equals(Object h) {
assert (index != NUL);
assert (((kelondroHandle) h).index != NUL);
return (this.index == ((kelondroHandle) h).index);
}
public int compare(Object h0, Object h1) {
assert (((kelondroHandle) h0).index != NUL);
assert (((kelondroHandle) h1).index != NUL);
if (((kelondroHandle) h0).index < ((kelondroHandle) h1).index) return -1;
if (((kelondroHandle) h0).index > ((kelondroHandle) h1).index) return 1;
return 0;
}
public int compareTo(Object h) {
// this is needed for a TreeMap
assert (index != NUL) : "this.index is NUL in compareTo";
assert (((kelondroHandle) h).index != NUL) : "handle.index is NUL in compareTo";
if (index < ((kelondroHandle) h).index) return -1;
if (index > ((kelondroHandle) h).index) return 1;
return 0;
}
public int hashCode() {
assert (index != NUL);
return this.index;
}
}

@ -0,0 +1,45 @@
// kelondroNode.java
// (C) 2003 - 2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 2003 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
// $LastChangedRevision: 1986 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.kelondro;
import java.io.IOException;
public interface kelondroNode {
public kelondroHandle handle();
public void setOHByte(int i, byte b);
public void setOHHandle(int i, kelondroHandle otherhandle);
public byte getOHByte(int i);
public kelondroHandle getOHHandle(int i);
public void commit() throws IOException;
public void setValueRow(byte[] row) throws IOException;
public boolean valid();
public byte[] getKey();
public byte[] getValueRow() throws IOException;
public String toString();
}

File diff suppressed because it is too large Load Diff

@ -53,7 +53,7 @@ import java.io.RandomAccessFile;
import java.util.Iterator;
import java.util.StringTokenizer;
public final class kelondroStack extends kelondroRecords {
public final class kelondroStack extends kelondroEcoRecords {
// define the Over-Head-Array
private static short thisOHBytes = 0; // our record definition does not need extra bytes
@ -68,7 +68,7 @@ public final class kelondroStack extends kelondroRecords {
public kelondroStack(File file, kelondroRow rowdef) throws IOException {
// this creates a new stack
super(file, false, 0, thisOHBytes, thisOHHandles, rowdef, thisFHandles, rowdef.columns() /* txtProps */, 80 /* txtPropWidth */);
super(file, thisOHBytes, thisOHHandles, rowdef, thisFHandles, rowdef.columns() /* txtProps */, 80 /* txtPropWidth */);
if (super.fileExisted) {
//if ((getHandle(root) == null) && (getHandle(toor) == null)) clear();
} else {
@ -113,8 +113,8 @@ public final class kelondroStack extends kelondroRecords {
}
public class stackIterator implements Iterator {
Handle nextHandle = null;
Handle lastHandle = null;
kelondroHandle nextHandle = null;
kelondroHandle lastHandle = null;
boolean up;
public stackIterator(boolean up) {
@ -129,8 +129,8 @@ public final class kelondroStack extends kelondroRecords {
public Object next() {
lastHandle = nextHandle;
try {
nextHandle = getNode(nextHandle, null, 0, false).getOHHandle((up) ? right : left);
return row().newEntry(getNode(lastHandle, null, 0, true).getValueRow());
nextHandle = new EcoNode(nextHandle).getOHHandle((up) ? right : left);
return row().newEntry(new EcoNode(lastHandle).getValueRow());
} catch (IOException e) {
throw new kelondroException(filename, "IO error at Counter:next()");
}
@ -138,7 +138,7 @@ public final class kelondroStack extends kelondroRecords {
public void remove() {
try {
unlinkNode(getNode(lastHandle, false));
unlinkNode(new EcoNode(lastHandle));
} catch (IOException e) {
e.printStackTrace();
}
@ -154,23 +154,23 @@ public final class kelondroStack extends kelondroRecords {
if (getHandle(toor) == null) {
if (getHandle(root) != null) throw new RuntimeException("push: internal organisation of root and toor");
// create node
Node n = newNode(row.bytes());
kelondroNode n = new EcoNode(row.bytes());
n.setOHHandle(left, null);
n.setOHHandle(right, null);
commit(n, CP_NONE);
n.commit();
// assign handles
setHandle(root, n.handle());
setHandle(toor, n.handle());
// thats it
} else {
// expand the list at the end
Node n = newNode(row.bytes());
kelondroNode n = new EcoNode(row.bytes());
n.setOHHandle(left, getHandle(toor));
n.setOHHandle(right, null);
Node n1 = getNode(getHandle(toor), null, 0, false);
kelondroNode n1 = new EcoNode(getHandle(toor));
n1.setOHHandle(right, n.handle());
commit(n, CP_NONE);
commit(n1, CP_NONE);
n.commit();
n1.commit();
// assign handles
setHandle(toor, n.handle());
// thats it
@ -179,7 +179,7 @@ public final class kelondroStack extends kelondroRecords {
public synchronized kelondroRow.Entry pop() throws IOException {
// return row ontop of the stack and shrink stack by one
Node n = topNode();
kelondroNode n = topNode();
if (n == null) return null;
kelondroRow.Entry ret = row().newEntry(n.getValueRow());
@ -192,14 +192,14 @@ public final class kelondroStack extends kelondroRecords {
public synchronized kelondroRow.Entry top() throws IOException {
// return row ontop of the stack
Node n = topNode();
kelondroNode n = topNode();
if (n == null) return null;
return row().newEntry(n.getValueRow());
}
public synchronized kelondroRow.Entry pot() throws IOException {
// return row on the bottom of the stack and remove record
Node n = botNode();
kelondroNode n = botNode();
if (n == null) return null;
kelondroRow.Entry ret = row().newEntry(n.getValueRow());
@ -212,25 +212,25 @@ public final class kelondroStack extends kelondroRecords {
public synchronized kelondroRow.Entry bot() throws IOException {
// return row on the bottom of the stack
Node n = botNode();
kelondroNode n = botNode();
if (n == null) return null;
return row().newEntry(n.getValueRow());
}
private void unlinkNode(Node n) throws IOException {
private void unlinkNode(kelondroNode n) throws IOException {
// join chaines over node
Handle l = n.getOHHandle(left);
Handle r = n.getOHHandle(right);
kelondroHandle l = n.getOHHandle(left);
kelondroHandle r = n.getOHHandle(right);
// look left
if (l == null) {
// reached the root on left side
setHandle(root, r);
} else {
// un-link the previous record
Node k = getNode(l, null, 0, false);
kelondroNode k = new EcoNode(l);
k.setOHHandle(left, k.getOHHandle(left));
k.setOHHandle(right, r);
commit(k, CP_NONE);
k.commit();
}
// look right
if (r == null) {
@ -238,27 +238,27 @@ public final class kelondroStack extends kelondroRecords {
setHandle(toor, l);
} else {
// un-link the following record
Node k = getNode(r, null, 0, false);
kelondroNode k = new EcoNode(r);
k.setOHHandle(left, l);
k.setOHHandle(right, k.getOHHandle(right));
commit(k, CP_NONE);
k.commit();
}
}
private Node topNode() throws IOException {
private kelondroNode topNode() throws IOException {
// return node ontop of the stack
if (size() == 0) return null;
Handle h = getHandle(toor);
kelondroHandle h = getHandle(toor);
if (h == null) return null;
return getNode(h, true);
return new EcoNode(h);
}
private Node botNode() throws IOException {
private kelondroNode botNode() throws IOException {
// return node on bottom of the stack
if (size() == 0) return null;
Handle h = getHandle(root);
kelondroHandle h = getHandle(root);
if (h == null) return null;
return getNode(h, true);
return new EcoNode(h);
}
public int imp(File file, String separator) throws IOException {
@ -297,7 +297,7 @@ public final class kelondroStack extends kelondroRecords {
}
}
public String hp(Handle h) {
public String hp(kelondroHandle h) {
if (h == null)
return "NULL";
else
@ -305,7 +305,7 @@ public final class kelondroStack extends kelondroRecords {
}
public void print() throws IOException {
super.print(false);
super.print();
Iterator it = stackIterator(true);
kelondroRow.Entry r;
while (it.hasNext()) {

@ -65,7 +65,7 @@ import java.util.logging.Logger;
import de.anomic.kelondro.kelondroRow.Entry;
import de.anomic.server.logging.serverLog;
public class kelondroTree extends kelondroRecords implements kelondroIndex {
public class kelondroTree extends kelondroCachedRecords implements kelondroIndex {
// logging (This probably needs someone to initialize the java.util.logging.* facilities);
public static final Logger log = Logger.getLogger("KELONDRO");
@ -161,13 +161,10 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
setHandle(root, null);
}
private void commitNode(Node n) throws IOException {
Handle left = n.getOHHandle(leftchild);
Handle right = n.getOHHandle(rightchild);
if ((left == null) && (right == null)) commit(n, CP_LOW);
else if (left == null) commit(n, CP_MEDIUM);
else if (right == null) commit(n, CP_MEDIUM);
else commit(n, CP_HIGH);
private void commitNode(kelondroNode n) throws IOException {
//kelondroHandle left = n.getOHHandle(leftchild);
//kelondroHandle right = n.getOHHandle(rightchild);
n.commit();
}
public boolean has(byte[] key) throws IOException {
@ -196,12 +193,12 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
// can be returned, together with the information wether the new key shall
// be left or right child.
private Node thenode, parentnode;
private CacheNode thenode, parentnode;
private boolean found; // property if node was found
private byte child; // -1: left child; 0: root node; 1: right child
// temporary variables
private Handle thisHandle;
private kelondroHandle thisHandle;
byte[] keybuffer;
protected Search() {
@ -237,7 +234,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
while (thisHandle != null) {
try {
parentnode = thenode;
thenode = getNode(thisHandle, thenode, (child == -1) ? leftchild : rightchild, true);
thenode = new CacheNode(thisHandle, thenode, (child == -1) ? leftchild : rightchild, true);
} catch (IllegalArgumentException e) {
logWarning("kelondroTree.Search.process: fixed a broken handle");
found = false;
@ -269,7 +266,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
thenode.setOHHandle(parent, null);
thenode.setOHHandle(leftchild, null);
thenode.setOHHandle(rightchild, null);
commit(thenode, CP_NONE);
thenode.commit();
logWarning("kelondroTree.Search.process: database contains loops; the loop-nodes have been auto-fixed");
found = false;
return;
@ -300,12 +297,12 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
return found;
}
public Node getMatcher() {
public CacheNode getMatcher() {
if (found) return thenode;
throw new IllegalArgumentException("wrong access of matcher");
}
public Node getParent() {
public CacheNode getParent() {
if (found) return parentnode;
return thenode;
}
@ -326,9 +323,9 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
}
}
public synchronized boolean isChild(Node childn, Node parentn, int child) {
public synchronized boolean isChild(kelondroNode childn, kelondroNode parentn, int child) {
if (childn == null) throw new IllegalArgumentException("isLeftChild: Node parameter is NULL");
Handle lc = parentn.getOHHandle(child);
kelondroHandle lc = parentn.getOHHandle(child);
if (lc == null) return false;
return (lc.equals(childn.handle()));
}
@ -355,8 +352,9 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
writeSearchObj.process(newrow.getColBytes(0));
if (writeSearchObj.found()) {
// a node with this key exist. simply overwrite the content and return old content
Node e = writeSearchObj.getMatcher();
result = row().newEntry(e.setValueRow(newrow.bytes()));
kelondroNode e = writeSearchObj.getMatcher();
result = row().newEntry(e.getValueRow());
e.setValueRow(newrow.bytes());
commitNode(e);
} else if (writeSearchObj.isRoot()) {
// a node with this key does not exist and there is no node at all
@ -364,7 +362,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
if (getHandle(root) != null)
throw new kelondroException(filename, "tried to create root node twice");
// we dont have any Nodes in the file, so start here to create one
Node e = newNode(newrow.bytes());
kelondroNode e = new CacheNode(newrow.bytes());
// write the propetries
e.setOHByte(magic, (byte) 1);
e.setOHByte(balance, (byte) 0);
@ -372,7 +370,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
e.setOHHandle(leftchild, null);
e.setOHHandle(rightchild, null);
// do updates
commit(e, CP_LOW);
e.commit();
setHandle(root, e.handle());
result = null;
} else {
@ -384,22 +382,22 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
// that side, but not if the assigned position is appropriate.
// create new node and assign values
Node parentNode = writeSearchObj.getParent();
Node theNode = newNode(newrow.bytes());
CacheNode parentNode = writeSearchObj.getParent();
CacheNode theNode = new CacheNode(newrow.bytes());
theNode.setOHByte(0, (byte) 1); // fresh magic
theNode.setOHByte(1, (byte) 0); // fresh balance
theNode.setOHHandle(parent, parentNode.handle());
theNode.setOHHandle(leftchild, null);
theNode.setOHHandle(rightchild, null);
commit(theNode, CP_LOW);
theNode.commit();
// check consistency and link new node to parent node
byte parentBalance;
if (writeSearchObj.isLeft()) {
if (parentNode.getOHHandle(leftchild) != null) throw new kelondroException(filename, "tried to create leftchild node twice. parent=" + new String(parentNode.getKey()) + ", leftchild=" + new String(getNode(parentNode.getOHHandle(leftchild), (Node) null, 0, true).getKey()));
if (parentNode.getOHHandle(leftchild) != null) throw new kelondroException(filename, "tried to create leftchild node twice. parent=" + new String(parentNode.getKey()) + ", leftchild=" + new String(new CacheNode(parentNode.getOHHandle(leftchild), (CacheNode) null, 0, true).getKey()));
parentNode.setOHHandle(leftchild, theNode.handle());
} else if (writeSearchObj.isRight()) {
if (parentNode.getOHHandle(rightchild) != null) throw new kelondroException(filename, "tried to create rightchild node twice. parent=" + new String(parentNode.getKey()) + ", rightchild=" + new String(getNode(parentNode.getOHHandle(rightchild), (Node) null, 0, true).getKey()));
if (parentNode.getOHHandle(rightchild) != null) throw new kelondroException(filename, "tried to create rightchild node twice. parent=" + new String(parentNode.getKey()) + ", rightchild=" + new String(new CacheNode(parentNode.getOHHandle(rightchild), (CacheNode) null, 0, true).getKey()));
parentNode.setOHHandle(rightchild, theNode.handle());
} else {
throw new kelondroException(filename, "neither left nor right child");
@ -415,7 +413,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
boolean increasedHight = true;
String path = "";
byte prevHight;
Handle parentSideHandle;
kelondroHandle parentSideHandle;
while (increasedHight) {
// update balance
@ -455,17 +453,17 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
break;
}
if (path.startsWith("RL")) {
Handle parentHandle = parentNode.handle();
LL_RightRotation(theNode, getNode(theNode.getOHHandle(leftchild), theNode, leftchild, false));
parentNode = getNode(parentHandle, null, 0, false); // reload the parent node
RR_LeftRotation(parentNode, getNode(parentNode.getOHHandle(rightchild), parentNode, rightchild, false));
kelondroHandle parentHandle = parentNode.handle();
LL_RightRotation(theNode, new CacheNode(theNode.getOHHandle(leftchild), theNode, leftchild, false));
parentNode = new CacheNode(parentHandle, null, 0, false); // reload the parent node
RR_LeftRotation(parentNode, new CacheNode(parentNode.getOHHandle(rightchild), parentNode, rightchild, false));
break;
}
if (path.startsWith("LR")) {
Handle parentHandle = parentNode.handle();
RR_LeftRotation(theNode, getNode(theNode.getOHHandle(rightchild), theNode, rightchild, false));
parentNode = getNode(parentHandle, null, 0, false); // reload the parent node
LL_RightRotation(parentNode, getNode(parentNode.getOHHandle(leftchild), parentNode, leftchild, false));
kelondroHandle parentHandle = parentNode.handle();
RR_LeftRotation(theNode, new CacheNode(theNode.getOHHandle(rightchild), theNode, rightchild, false));
parentNode = new CacheNode(parentHandle, null, 0, false); // reload the parent node
LL_RightRotation(parentNode, new CacheNode(parentNode.getOHHandle(leftchild), parentNode, leftchild, false));
break;
}
break;
@ -473,7 +471,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
// crawl up the tree
if (parentNode.getOHHandle(parent) == null) break; // root reached: stop
theNode = parentNode;
parentNode = getNode(parentNode.getOHHandle(parent), null, 0, false);
parentNode = new CacheNode(parentNode.getOHHandle(parent), null, 0, false);
}
result = null; // that means: no previous stored value present
@ -496,14 +494,14 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
while (i.hasNext()) addUnique((kelondroRow.Entry) i.next());
}
private void assignChild(Node parentNode, Node childNode, int childType) throws IOException {
private void assignChild(kelondroNode parentNode, kelondroNode childNode, int childType) throws IOException {
parentNode.setOHHandle(childType, childNode.handle());
childNode.setOHHandle(parent, parentNode.handle());
commitNode(parentNode);
commitNode(childNode);
}
private void replace(Node oldNode, Node oldNodeParent, Node newNode) throws IOException {
private void replace(kelondroNode oldNode, kelondroNode oldNodeParent, kelondroNode newNode) throws IOException {
// this routine looks where the oldNode is connected to, and replaces
// the anchor's link to the oldNode by the newNode-link
// the new link gets the anchor as parent link assigned
@ -519,20 +517,20 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
// not the root, find parent
// ok, we have the parent, but for updating the child link we must know
// if the oldNode was left or right child
Handle parentSideHandle = oldNodeParent.getOHHandle(leftchild);
if ((parentSideHandle != null) && (parentSideHandle.equals(oldNode.handle()))) {
// update left node from parent
oldNodeParent.setOHHandle(leftchild, newNode.handle());
kelondroHandle parentSideHandle = oldNodeParent.getOHHandle(leftchild);
if ((parentSideHandle != null) && (parentSideHandle.equals(oldNode.handle()))) {
// update left node from parent
oldNodeParent.setOHHandle(leftchild, newNode.handle());
}
parentSideHandle = oldNodeParent.getOHHandle(rightchild);
if ((parentSideHandle != null) && (parentSideHandle.equals(oldNode.handle()))) {
// update right node from parent
oldNodeParent.setOHHandle(rightchild, newNode.handle());
// update right node from parent
oldNodeParent.setOHHandle(rightchild, newNode.handle());
}
commitNode(oldNodeParent);
// update new Node
newNode.setOHHandle(parent, oldNodeParent.handle());
commitNode(newNode);
commitNode(newNode);
}
// finished. remember that we did not set the links to the oldNode
// we have also not set the children of the newNode.
@ -550,18 +548,18 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
return 0;
}
private void LL_RightRotation(Node parentNode, Node childNode) throws IOException {
private void LL_RightRotation(kelondroNode parentNode, CacheNode childNode) throws IOException {
// replace the parent node; the parent is afterwards unlinked
Handle p2Handle = parentNode.getOHHandle(parent);
Node p2Node = (p2Handle == null) ? null : getNode(p2Handle, null, 0, false);
kelondroHandle p2Handle = parentNode.getOHHandle(parent);
kelondroNode p2Node = (p2Handle == null) ? null : new CacheNode(p2Handle, null, 0, false);
replace(parentNode, p2Node, childNode);
// set the left son of the parent to the right son of the childNode
Handle childOfChild = childNode.getOHHandle(rightchild);
kelondroHandle childOfChild = childNode.getOHHandle(rightchild);
if (childOfChild == null) {
parentNode.setOHHandle(leftchild, null);
} else {
assignChild(parentNode, getNode(childOfChild, childNode, rightchild, false), leftchild);
assignChild(parentNode, new CacheNode(childOfChild, childNode, rightchild, false), leftchild);
}
// link the old parent node as the right child of childNode
@ -581,18 +579,18 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
commitNode(childNode);
}
private void RR_LeftRotation(Node parentNode, Node childNode) throws IOException {
private void RR_LeftRotation(kelondroNode parentNode, CacheNode childNode) throws IOException {
// replace the parent node; the parent is afterwards unlinked
Handle p2Handle = parentNode.getOHHandle(parent);
Node p2Node = (p2Handle == null) ? null : getNode(p2Handle, null, 0, false);
kelondroHandle p2Handle = parentNode.getOHHandle(parent);
kelondroNode p2Node = (p2Handle == null) ? null : new CacheNode(p2Handle, null, 0, false);
replace(parentNode, p2Node, childNode);
// set the left son of the parent to the right son of the childNode
Handle childOfChild = childNode.getOHHandle(leftchild);
kelondroHandle childOfChild = childNode.getOHHandle(leftchild);
if (childOfChild == null) {
parentNode.setOHHandle(rightchild, null);
} else {
assignChild(parentNode, getNode(childOfChild, childNode, leftchild, false), rightchild);
assignChild(parentNode, new CacheNode(childOfChild, childNode, leftchild, false), rightchild);
}
// link the old parent node as the left child of childNode
@ -625,7 +623,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
synchronized(writeSearchObj) {
writeSearchObj.process(key);
if (writeSearchObj.found()) {
Node result = writeSearchObj.getMatcher();
CacheNode result = writeSearchObj.getMatcher();
kelondroRow.Entry values = row().newEntry(result.getValueRow());
remove(result, writeSearchObj.getParent());
return values;
@ -638,7 +636,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
public kelondroRow.Entry removeOne() throws IOException {
// removes just any entry and removes that entry
synchronized(writeSearchObj) {
Node theOne = lastNode();
CacheNode theOne = lastNode();
kelondroRow.Entry values = row().newEntry(theOne.getValueRow());
remove(theOne, null);
return values;
@ -649,44 +647,44 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
while (size() > 0) remove(lastNode(), null);
}
private void remove(Node node, Node parentOfNode) throws IOException {
// there are three cases when removing a node
// - the node is a leaf - it can be removed easily
// - the node has one child - the child replaces the node
// - the node has two childs - it can be replaced either
// by the greatest node of the left child or the smallest
// node of the right child
private void remove(CacheNode node, kelondroNode parentOfNode) throws IOException {
// there are three cases when removing a node
// - the node is a leaf - it can be removed easily
// - the node has one child - the child replaces the node
// - the node has two childs - it can be replaced either
// by the greatest node of the left child or the smallest
// node of the right child
Node childnode;
if ((node.getOHHandle(leftchild) == null) && (node.getOHHandle(rightchild) == null)) {
kelondroNode childnode;
if ((node.getOHHandle(leftchild) == null) && (node.getOHHandle(rightchild) == null)) {
// easy case: the node is a leaf
if (parentOfNode == null) {
// this is the root!
setHandle(root, null);
} else {
Handle h = parentOfNode.getOHHandle(leftchild);
if ((h != null) && (h.equals(node.handle()))) parentOfNode.setOHHandle(leftchild, null);
h = parentOfNode.getOHHandle(rightchild);
if ((h != null) && (h.equals(node.handle()))) parentOfNode.setOHHandle(rightchild, null);
commitNode(parentOfNode);
kelondroHandle h = parentOfNode.getOHHandle(leftchild);
if ((h != null) && (h.equals(node.handle()))) parentOfNode.setOHHandle(leftchild, null);
h = parentOfNode.getOHHandle(rightchild);
if ((h != null) && (h.equals(node.handle()))) parentOfNode.setOHHandle(rightchild, null);
commitNode(parentOfNode);
}
} else if ((node.getOHHandle(leftchild) != null) && (node.getOHHandle(rightchild) == null)) {
replace(node, parentOfNode, getNode(node.getOHHandle(leftchild), node, leftchild, false));
replace(node, parentOfNode, new CacheNode(node.getOHHandle(leftchild), node, leftchild, false));
} else if ((node.getOHHandle(leftchild) == null) && (node.getOHHandle(rightchild) != null)) {
replace(node, parentOfNode, getNode(node.getOHHandle(rightchild), node, rightchild, false));
replace(node, parentOfNode, new CacheNode(node.getOHHandle(rightchild), node, rightchild, false));
} else {
// difficult case: node has two children
Node repl = lastNode(getNode(node.getOHHandle(leftchild), node, leftchild, false));
CacheNode repl = lastNode(new CacheNode(node.getOHHandle(leftchild), node, leftchild, false));
//System.out.println("last node is " + repl.toString());
// we remove that replacement node and put it where the node was
// this seems to be recursive, but is not since the replacement
// node cannot have two children (it would not have been the smallest or greatest)
Node n;
Handle h;
kelondroNode n;
kelondroHandle h;
// remove leaf
if ((repl.getOHHandle(leftchild) == null) && (repl.getOHHandle(rightchild) == null)) {
// the replacement cannot be the root, so simply remove from parent node
n = getNode(repl.getOHHandle(parent), null, 0, false); // parent node of replacement node
n = new CacheNode(repl.getOHHandle(parent), null, 0, false); // parent node of replacement node
h = n.getOHHandle(leftchild);
if ((h != null) && (h.equals(repl.handle()))) n.setOHHandle(leftchild, null);
h = n.getOHHandle(rightchild);
@ -694,11 +692,11 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
commitNode(n);
} else if ((repl.getOHHandle(leftchild) != null) && (repl.getOHHandle(rightchild) == null)) {
try {
childnode = getNode(repl.getOHHandle(leftchild), repl, leftchild, false);
replace(repl, getNode(repl.getOHHandle(parent), null, 0, false), childnode);
childnode = new CacheNode(repl.getOHHandle(leftchild), repl, leftchild, false);
replace(repl, new CacheNode(repl.getOHHandle(parent), null, 0, false), childnode);
} catch (IllegalArgumentException e) {
// now treat the situation as if that link had been null before
n = getNode(repl.getOHHandle(parent), null, 0, false); // parent node of replacement node
n = new CacheNode(repl.getOHHandle(parent), null, 0, false); // parent node of replacement node
h = n.getOHHandle(leftchild);
if ((h != null) && (h.equals(repl.handle()))) n.setOHHandle(leftchild, null);
h = n.getOHHandle(rightchild);
@ -707,11 +705,11 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
}
} else if ((repl.getOHHandle(leftchild) == null) && (repl.getOHHandle(rightchild) != null)) {
try {
childnode = getNode(repl.getOHHandle(rightchild), repl, rightchild, false);
replace(repl, getNode(repl.getOHHandle(parent), null, 0, false), childnode);
childnode = new CacheNode(repl.getOHHandle(rightchild), repl, rightchild, false);
replace(repl, new CacheNode(repl.getOHHandle(parent), null, 0, false), childnode);
} catch (IllegalArgumentException e) {
// now treat the situation as if that link had been null before
n = getNode(repl.getOHHandle(parent), null, 0, false); // parent node of replacement node
n = new CacheNode(repl.getOHHandle(parent), null, 0, false); // parent node of replacement node
h = n.getOHHandle(leftchild);
if ((h != null) && (h.equals(repl.handle()))) n.setOHHandle(leftchild, null);
h = n.getOHHandle(rightchild);
@ -720,30 +718,30 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
}
}
//System.out.println("node before reload is " + node.toString());
node = getNode(node.handle(), null, 0, false); // reload the node, it is possible that it has been changed
node = new CacheNode(node.handle(), null, 0, false); // reload the node, it is possible that it has been changed
//System.out.println("node after reload is " + node.toString());
// now plant in the replha node
byte b = node.getOHByte(balance); // save balance of disappearing node
Handle parentHandle = node.getOHHandle(parent);
Handle leftchildHandle = node.getOHHandle(leftchild);
Handle rightchildHandle = node.getOHHandle(rightchild);
kelondroHandle parentHandle = node.getOHHandle(parent);
kelondroHandle leftchildHandle = node.getOHHandle(leftchild);
kelondroHandle rightchildHandle = node.getOHHandle(rightchild);
replace(node, parentOfNode, repl);
repl.setOHByte(balance, b); // restore balance
repl.setOHHandle(parent, parentHandle); // restore handles
repl.setOHHandle(leftchild, leftchildHandle);
repl.setOHHandle(rightchild, rightchildHandle);
commitNode(repl);
repl.setOHHandle(leftchild, leftchildHandle);
repl.setOHHandle(rightchild, rightchildHandle);
commitNode(repl);
// last thing to do: change uplinks of children to this new node
if (leftchildHandle != null) {
n = getNode(leftchildHandle, node, leftchild, false);
n.setOHHandle(parent, repl.handle());
commitNode(n);
n = new CacheNode(leftchildHandle, node, leftchild, false);
n.setOHHandle(parent, repl.handle());
commitNode(n);
}
if (rightchildHandle != null) {
n = getNode(rightchildHandle, node, rightchild, false);
n.setOHHandle(parent, repl.handle());
commitNode(n);
n = new CacheNode(rightchildHandle, node, rightchild, false);
n.setOHHandle(parent, repl.handle());
commitNode(n);
}
}
// move node to recycling queue
@ -752,20 +750,20 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
}
}
protected Node firstNode() throws IOException {
Handle h = getHandle(root);
protected CacheNode firstNode() throws IOException {
kelondroHandle h = getHandle(root);
if (h == null) return null;
return firstNode(getNode(h, null, 0, true));
return firstNode(new CacheNode(h, null, 0, true));
}
protected Node firstNode(Node node) throws IOException {
protected CacheNode firstNode(CacheNode node) throws IOException {
if (node == null) throw new IllegalArgumentException("firstNode: node=null");
Handle h = node.getOHHandle(leftchild);
kelondroHandle h = node.getOHHandle(leftchild);
HashSet visitedNodeKeys = new HashSet(); // to detect loops
String nodeKey;
while (h != null) {
try {
node = getNode(h, node, leftchild, true);
node = new CacheNode(h, node, leftchild, true);
nodeKey = new String(node.getKey());
if (visitedNodeKeys.contains(nodeKey)) throw new kelondroException(this.filename, "firstNode: database contains loops: '" + nodeKey + "' appears twice.");
visitedNodeKeys.add(nodeKey);
@ -778,20 +776,20 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
return node;
}
protected Node lastNode() throws IOException {
Handle h = getHandle(root);
protected CacheNode lastNode() throws IOException {
kelondroHandle h = getHandle(root);
if (h == null) return null;
return lastNode(getNode(h, null, 0, true));
return lastNode(new CacheNode(h, null, 0, true));
}
protected Node lastNode(Node node) throws IOException {
protected CacheNode lastNode(CacheNode node) throws IOException {
if (node == null) throw new IllegalArgumentException("lastNode: node=null");
Handle h = node.getOHHandle(rightchild);
kelondroHandle h = node.getOHHandle(rightchild);
HashSet visitedNodeKeys = new HashSet(); // to detect loops
String nodeKey;
while (h != null) {
try {
node = getNode(h, node, rightchild, true);
node = new CacheNode(h, node, rightchild, true);
nodeKey = new String(node.getKey());
if (visitedNodeKeys.contains(nodeKey)) throw new kelondroException(this.filename, "lastNode: database contains loops: '" + nodeKey + "' appears twice.");
visitedNodeKeys.add(nodeKey);
@ -807,7 +805,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
private class nodeIterator implements Iterator {
// we implement an iteration! (not a recursive function as the structure would suggest...)
// the iterator iterates Node objects
Node nextNode = null;
CacheNode nextNode = null;
boolean up, rot;
LinkedList nodeStack;
int count;
@ -831,7 +829,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
if (search.found()) {
init(search.getMatcher());
} else {
Node nn = search.getParent();
CacheNode nn = search.getParent();
if (nn == null) {
this.nextNode = null;
} else {
@ -872,16 +870,16 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
}
}
private void init(Node start) throws IOException {
private void init(CacheNode start) throws IOException {
this.nextNode = start;
// fill node stack for start node
nodeStack = new LinkedList();
Handle searchHandle = getHandle(root);
kelondroHandle searchHandle = getHandle(root);
if (searchHandle == null) {nextNode = null; return;}
Node searchNode = getNode(searchHandle, null, 0, false);
CacheNode searchNode = new CacheNode(searchHandle, null, 0, false);
byte[] startKey = start.getKey();
int c, ct;
while ((c = row().objectOrder.compare(startKey, searchNode.getKey())) != 0) {
@ -892,7 +890,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
// go to next node
searchHandle = searchNode.getOHHandle(ct);
if (searchHandle == null) throw new kelondroException(filename, "nodeIterator.init: start node does not exist (handle null)");
searchNode = getNode(searchHandle, searchNode, ct, false);
searchNode = new CacheNode(searchHandle, searchNode, ct, false);
if (searchNode == null) throw new kelondroException(filename, "nodeIterator.init: start node does not exist (node null)");
}
// now every parent node to the start node is on the stack
@ -921,26 +919,26 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
// middle-case
try {
int childtype = (up) ? rightchild : leftchild;
Handle childHandle = nextNode.getOHHandle(childtype);
kelondroHandle childHandle = nextNode.getOHHandle(childtype);
if (childHandle != null) {
//System.out.println("go to other leg, stack size=" + nodeStack.size());
// we have walked one leg of the tree; now go to the other one: step down to next child
HashSet visitedNodeHandles = new HashSet(); // to detect loops
nodeStack.addLast(new Object[]{nextNode, new Integer(childtype)});
nextNode = getNode(childHandle, nextNode, childtype, false);
nextNode = new CacheNode(childHandle, nextNode, childtype, false);
childtype = (up) ? leftchild : rightchild;
while ((childHandle = nextNode.getOHHandle(childtype)) != null) {
if (visitedNodeHandles.contains(childHandle)) {
// try to repair the nextNode
nextNode.setOHHandle(childtype, null);
commit(nextNode, CP_NONE);
nextNode.commit();
logWarning("nodeIterator.next: internal loopback; fixed loop and try to go on");
break;
}
visitedNodeHandles.add(childHandle);
try {
nodeStack.addLast(new Object[]{nextNode, new Integer(childtype)});
nextNode = getNode(childHandle, nextNode, childtype, false);
nextNode = new CacheNode(childHandle, nextNode, childtype, false);
} catch (IllegalArgumentException e) {
// return what we have
nodeStack.removeLast();
@ -958,13 +956,13 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
nextNode = null;
} else {
Object[] stacktop;
Node parentNode = null;
CacheNode parentNode = null;
int parentpointer = (up) ? rightchild : leftchild;
while ((nodeStack.size() != 0) && (parentpointer == ((up) ? rightchild : leftchild))) {
//System.out.println("step up");
// go on, walk up further
stacktop = (Object[]) nodeStack.removeLast(); // top of stack: Node/parentpointer pair
parentNode = (Node) stacktop[0];
parentNode = (CacheNode) stacktop[0];
parentpointer = ((Integer) stacktop[1]).intValue();
}
if ((nodeStack.size() == 0) && (parentpointer == ((up) ? rightchild : leftchild))) {
@ -992,12 +990,12 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
setOrder.direction(up);
setOrder.rotate(firstKey);
TreeMap rows = new TreeMap(setOrder);
Node n;
kelondroNode n;
String key;
synchronized (this) {
Iterator i = (firstKey == null) ? new nodeIterator(up, false) : new nodeIterator(up, false, firstKey, including);
while ((rows.size() < count) && (i.hasNext())) {
n = (Node) i.next();
n = (kelondroNode) i.next();
if (n == null) return rows;
key = new String(n.getKey());
if (rows.put(key, row().newEntry(n.getValueRow())) != null) return rows; // protection against loops
@ -1012,11 +1010,11 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
setOrder.direction(up);
setOrder.rotate(firstKey);
TreeSet set = new TreeSet(setOrder);
Node n;
kelondroNode n;
synchronized (this) {
Iterator i = (firstKey == null) ? new nodeIterator(up, rotating) : new nodeIterator(up, rotating, firstKey, including);
while ((set.size() < count) && (i.hasNext())) {
n = (Node) i.next();
n = (kelondroNode) i.next();
if ((n != null) && (n.getKey() != null)) set.add(new String(n.getKey()));
}
}
@ -1134,45 +1132,45 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
public synchronized int height() {
try {
Handle h = getHandle(root);
kelondroHandle h = getHandle(root);
if (h == null) return 0;
return height(getNode(h, null, 0, false));
return height(new CacheNode(h, null, 0, false));
} catch (IOException e) {
return 0;
}
}
private int height(Node node) throws IOException {
private int height(CacheNode node) throws IOException {
if (node == null) return 0;
Handle h = node.getOHHandle(leftchild);
int hl = (h == null) ? 0 : height(getNode(h, node, leftchild, false));
kelondroHandle h = node.getOHHandle(leftchild);
int hl = (h == null) ? 0 : height(new CacheNode(h, node, leftchild, false));
h = node.getOHHandle(rightchild);
int hr = (h == null) ? 0 : height(getNode(h, node, rightchild, false));
int hr = (h == null) ? 0 : height(new CacheNode(h, node, rightchild, false));
if (hl > hr) return hl + 1;
return hr + 1;
}
public void print() throws IOException {
super.print(false);
super.print();
int height = height();
System.out.println("HEIGHT = " + height);
Vector thisline = new Vector();
thisline.add(getHandle(root));
Vector nextline;
Handle handle;
Node node;
kelondroHandle handle;
kelondroNode node;
int linelength, width = (1 << (height - 1)) * (row().width(0) + 1);
String key;
for (int h = 1; h < height; h++) {
linelength = width / (thisline.size() * 2);
nextline = new Vector();
for (int i = 0; i < thisline.size(); i++) {
handle = (Handle) thisline.elementAt(i);
handle = (kelondroHandle) thisline.elementAt(i);
if (handle == null) {
node = null;
key = "[..]";
} else {
node = getNode(handle, null, 0, false);
node = new CacheNode(handle, null, 0, false);
if (node == null) key = "NULL"; else key = new String(node.getKey());
}
System.out.print(key);
@ -1202,12 +1200,12 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
if ((thisline != null) && (width >= 0)) {
linelength = width / thisline.size();
for (int i = 0; i < thisline.size(); i++) {
handle = (Handle) thisline.elementAt(i);
handle = (kelondroHandle) thisline.elementAt(i);
if (handle == null) {
node = null;
key = "NULL";
} else {
node = getNode(handle, null, 0, false);
node = new CacheNode(handle, null, 0, false);
if (node == null) key = "NULL"; else key = new String(node.getKey());
}
System.out.print(key);

@ -81,7 +81,7 @@ public class languageDataExtractor {
String code = "";
String file = "";
String input = "";
String line = "";
//String line = "";
String name = "";
String output = "";
String sKey = "";

@ -55,9 +55,9 @@ import de.anomic.kelondro.kelondroBase64Order;
import de.anomic.kelondro.kelondroCache;
import de.anomic.kelondro.kelondroFlexTable;
import de.anomic.kelondro.kelondroIndex;
import de.anomic.kelondro.kelondroRecords;
import de.anomic.kelondro.kelondroRow;
import de.anomic.kelondro.kelondroStack;
import de.anomic.kelondro.kelondroAbstractRecords;
import de.anomic.server.logging.serverLog;
import de.anomic.yacy.yacySeedDB;
@ -175,7 +175,7 @@ public class plasmaCrawlBalancer {
}
}
if (kelondroRecords.debugmode) {
if (kelondroAbstractRecords.debugmode) {
serverLog.logWarning("PLASMA BALANCER", "remove: not found urlhash " + urlhash + " in " + stackname);
}
return new plasmaCrawlEntry(entry);
@ -194,7 +194,7 @@ public class plasmaCrawlBalancer {
int componentsize = urlFileStack.size() + urlRAMStack.size() + sizeDomainStacks();
if (componentsize != urlFileIndex.size()) {
// here is urlIndexFile.size() always smaller. why?
if (kelondroRecords.debugmode) {
if (kelondroAbstractRecords.debugmode) {
serverLog.logWarning("PLASMA BALANCER", "size operation wrong in " + stackname + " - componentsize = " + componentsize + ", urlFileIndex.size() = " + urlFileIndex.size());
}
if ((componentsize == 0) && (urlFileIndex.size() > 0)) {
@ -207,8 +207,10 @@ public class plasmaCrawlBalancer {
private int sizeDomainStacks() {
if (domainStacks == null) return 0;
int sum = 0;
Iterator i = domainStacks.values().iterator();
while (i.hasNext()) sum += ((LinkedList) i.next()).size();
synchronized (domainStacks) {
Iterator i = domainStacks.values().iterator();
while (i.hasNext()) sum += ((LinkedList) i.next()).size();
}
return sum;
}
@ -451,7 +453,7 @@ public class plasmaCrawlBalancer {
String urlhash = (String) urlRAMStack.get(dist);
kelondroRow.Entry entry = urlFileIndex.get(urlhash.getBytes());
if (entry == null) {
if (kelondroRecords.debugmode) serverLog.logWarning("PLASMA BALANCER", "no entry in index for urlhash " + urlhash);
if (kelondroAbstractRecords.debugmode) serverLog.logWarning("PLASMA BALANCER", "no entry in index for urlhash " + urlhash);
return null;
}
return new plasmaCrawlEntry(entry);

@ -79,10 +79,6 @@ public class plasmaCrawlNURL {
remoteStack = new plasmaCrawlBalancer(cachePath, "urlNoticeRemoteStack");
}
public int size() {
return coreStack.size() + limitStack.size() + remoteStack.size();
}
public void close() {
coreStack.close();
limitStack.close();
@ -90,7 +86,7 @@ public class plasmaCrawlNURL {
remoteStack.close();
}
public int stackSize() {
public int size() {
// this does not count the overhang stack size
return coreStack.size() + limitStack.size() + remoteStack.size();
}

@ -144,11 +144,11 @@ import de.anomic.plasma.plasmaURL;
import de.anomic.index.indexURLEntry;
import de.anomic.kelondro.kelondroBitfield;
import de.anomic.kelondro.kelondroCache;
import de.anomic.kelondro.kelondroCachedRecords;
import de.anomic.kelondro.kelondroException;
import de.anomic.kelondro.kelondroMSetTools;
import de.anomic.kelondro.kelondroMapTable;
import de.anomic.kelondro.kelondroNaturalOrder;
import de.anomic.kelondro.kelondroRecords;
import de.anomic.net.URL;
import de.anomic.plasma.dbImport.dbImportManager;
import de.anomic.plasma.parser.ParserException;
@ -1148,7 +1148,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
long memprereq = Math.max(getConfigLong(INDEXER_MEMPREREQ, 0), wordIndex.minMem());
// setConfig(INDEXER_MEMPREREQ, memprereq);
//setThreadPerformance(INDEXER, getConfigLong(INDEXER_IDLESLEEP, 0), getConfigLong(INDEXER_BUSYSLEEP, 0), memprereq);
kelondroRecords.setCacheGrowStati(memprereq + 4 * 1024 * 1024, memprereq + 2 * 1024 * 1024);
kelondroCachedRecords.setCacheGrowStati(memprereq + 4 * 1024 * 1024, memprereq + 2 * 1024 * 1024);
kelondroCache.setCacheGrowStati(memprereq + 4 * 1024 * 1024, memprereq + 2 * 1024 * 1024);
// make parser
@ -1620,7 +1620,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
* shutdown procedure
*/
public boolean cleanProfiles() throws InterruptedException {
if ((sbQueue.size() > 0) || (cacheLoader.size() > 0) || (noticeURL.stackSize() > 0)) return false;
if ((sbQueue.size() > 0) || (cacheLoader.size() > 0) || (noticeURL.size() > 0)) return false;
final Iterator iter = profiles.profiles(true);
plasmaCrawlProfile.entry entry;
boolean hasDoneSomething = false;
@ -2074,7 +2074,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
long memprereq = Math.max(getConfigLong(INDEXER_MEMPREREQ, 0), wordIndex.minMem());
// setConfig(INDEXER_MEMPREREQ, memprereq);
//setThreadPerformance(INDEXER, getConfigLong(INDEXER_IDLESLEEP, 0), getConfigLong(INDEXER_BUSYSLEEP, 0), memprereq);
kelondroRecords.setCacheGrowStati(memprereq + 4 * 1024 * 1024, memprereq + 2 * 1024 * 1024);
kelondroCachedRecords.setCacheGrowStati(memprereq + 4 * 1024 * 1024, memprereq + 2 * 1024 * 1024);
kelondroCache.setCacheGrowStati(memprereq + 4 * 1024 * 1024, memprereq + 2 * 1024 * 1024);
// update the cluster set
@ -3260,11 +3260,11 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
if (wordIndex.size() < 100) {
return "no DHT distribution: not enough words - wordIndex.size() = " + wordIndex.size();
}
if ((getConfig(INDEX_DIST_ALLOW_WHILE_CRAWLING, "false").equalsIgnoreCase("false")) && (noticeURL.stackSize() > 0)) {
return "no DHT distribution: crawl in progress: noticeURL.stackSize() = " + noticeURL.stackSize() + ", sbQueue.size() = " + sbQueue.size();
if ((getConfig(INDEX_DIST_ALLOW_WHILE_CRAWLING, "false").equalsIgnoreCase("false")) && (noticeURL.size() > 0)) {
return "no DHT distribution: crawl in progress: noticeURL.stackSize() = " + noticeURL.size() + ", sbQueue.size() = " + sbQueue.size();
}
if ((getConfig(INDEX_DIST_ALLOW_WHILE_INDEXING, "false").equalsIgnoreCase("false")) && (sbQueue.size() > 1)) {
return "no DHT distribution: indexing in progress: noticeURL.stackSize() = " + noticeURL.stackSize() + ", sbQueue.size() = " + sbQueue.size();
return "no DHT distribution: indexing in progress: noticeURL.stackSize() = " + noticeURL.size() + ", sbQueue.size() = " + sbQueue.size();
}
return null;
}

@ -115,7 +115,7 @@ public class yacyPeerActions {
seedDB.mySeed.put(yacySeed.UPTIME, Long.toString(uptime/60)); // the number of minutes that the peer is up in minutes/day (moving average MA30)
seedDB.mySeed.put(yacySeed.LCOUNT, Integer.toString(sb.wordIndex.loadedURL.size())); // the number of links that the peer has stored (LURL's)
seedDB.mySeed.put(yacySeed.NCOUNT, Integer.toString(sb.noticeURL.stackSize())); // the number of links that the peer has noticed, but not loaded (NURL's)
seedDB.mySeed.put(yacySeed.NCOUNT, Integer.toString(sb.noticeURL.size())); // the number of links that the peer has noticed, but not loaded (NURL's)
seedDB.mySeed.put(yacySeed.ICOUNT, Integer.toString(sb.wordIndex.size())); // the minimum number of words that the peer has indexed (as it says)
seedDB.mySeed.put(yacySeed.SCOUNT, Integer.toString(seedDB.sizeConnected())); // the number of seeds that the peer has stored
seedDB.mySeed.put(yacySeed.CCOUNT, Double.toString(((int) ((seedDB.sizeConnected() + seedDB.sizeDisconnected() + seedDB.sizePotential()) * 60.0 / (uptime + 1.01)) * 100) / 100.0)); // the number of clients that the peer connects (as connects/hour)

Loading…
Cancel
Save