- After the removal of the Tree class some code simplifications are possible. This affects mostly the Records class, which can be refactored and the result of the refactoring results in a reduced number of classes.

- The EcoTable was renamed to Table.


git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6151 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 16 years ago
parent c5122d6836
commit 9a674d8047

@ -70,7 +70,7 @@
</tr>
</table>
<p><strong>EcoTable RAM Index:</strong></p>
<p><strong>Table RAM Index:</strong></p>
<table border="0" cellpadding="2" cellspacing="1">
<tr class="TableHeader" valign="bottom">
<td rowspan="2">Table</td>

@ -31,7 +31,7 @@ import java.util.Map;
import de.anomic.http.httpRequestHeader;
import de.anomic.kelondro.index.Cache;
import de.anomic.kelondro.table.EcoTable;
import de.anomic.kelondro.table.Table;
import de.anomic.kelondro.util.MemoryControl;
import de.anomic.kelondro.util.FileUtils;
import de.anomic.plasma.plasmaSwitchboard;
@ -88,15 +88,15 @@ public class PerformanceMemory_p {
prop.putNum("memoryUsedAfterInitAGC", (memoryTotalAfterInitAGC - memoryFreeAfterInitAGC) / KB);
prop.putNum("memoryUsedNow", (memoryTotalNow - memoryFreeNow) / MB);
// write table for EcoTable index sizes
Iterator<String> i = EcoTable.filenames();
// write table for Table index sizes
Iterator<String> i = Table.filenames();
String filename;
Map<String, String> map;
int p, c = 0;
long mem, totalmem = 0;
while (i.hasNext()) {
filename = i.next();
map = EcoTable.memoryStats(filename);
map = Table.memoryStats(filename);
prop.put("EcoList_" + c + "_tableIndexPath", ((p = filename.indexOf("DATA")) < 0) ? filename : filename.substring(p));
prop.putNum("EcoList_" + c + "_tableSize", map.get("tableSize"));

@ -19,7 +19,7 @@ import de.anomic.kelondro.index.ObjectArrayCache;
import de.anomic.kelondro.order.Base64Order;
import de.anomic.kelondro.order.CloneableIterator;
import de.anomic.kelondro.order.NaturalOrder;
import de.anomic.kelondro.table.EcoTable;
import de.anomic.kelondro.table.Table;
import de.anomic.kelondro.table.SQLTable;
import de.anomic.kelondro.table.SplitTable;
import de.anomic.kelondro.util.MemoryControl;
@ -199,7 +199,7 @@ public class dbtest {
return new SplitTable(tablepath, new File(tablename).getName(), testRow, true);
}
if (dbe.equals("kelondroEcoTable")) {
return new EcoTable(new File(tablename), testRow, EcoTable.tailCacheForceUsage, 1000, 0);
return new Table(new File(tablename), testRow, Table.tailCacheForceUsage, 1000, 0);
}
if (dbe.equals("mysql")) {
return new SQLTable("mysql", testRow);

@ -36,7 +36,7 @@ import java.util.concurrent.ConcurrentLinkedQueue;
import de.anomic.kelondro.index.Row;
import de.anomic.kelondro.index.ObjectIndex;
import de.anomic.kelondro.order.CloneableIterator;
import de.anomic.kelondro.table.EcoTable;
import de.anomic.kelondro.table.Table;
import de.anomic.yacy.logging.Log;
public class Balancer {
@ -71,7 +71,7 @@ public class Balancer {
if (!(cachePath.exists())) cachePath.mkdir(); // make the path
cacheStacksPath.mkdirs();
File f = new File(cacheStacksPath, stackname + indexSuffix);
urlFileIndex = new EcoTable(f, CrawlEntry.rowdef, (fullram) ? EcoTable.tailCacheUsageAuto : EcoTable.tailCacheDenyUsage, EcoFSBufferSize, 0);
urlFileIndex = new Table(f, CrawlEntry.rowdef, (fullram) ? Table.tailCacheUsageAuto : Table.tailCacheDenyUsage, EcoFSBufferSize, 0);
profileErrors = 0;
lastDomainStackFill = 0;
Log.logInfo("Balancer", "opened balancer file with " + urlFileIndex.size() + " entries from " + f.toString());

@ -36,7 +36,7 @@ import de.anomic.kelondro.index.Row;
import de.anomic.kelondro.index.RowSet;
import de.anomic.kelondro.index.ObjectIndex;
import de.anomic.kelondro.order.Base64Order;
import de.anomic.kelondro.table.EcoTable;
import de.anomic.kelondro.table.Table;
import de.anomic.kelondro.table.SplitTable;
import de.anomic.kelondro.util.FileUtils;
import de.anomic.yacy.yacySeedDB;
@ -70,7 +70,7 @@ public class ZURL {
if (f.isDirectory()) SplitTable.delete(cachePath, tablename); else FileUtils.deletedelete(f);
}
}
this.urlIndex = new EcoTable(f, rowdef, EcoTable.tailCacheDenyUsage, EcoFSBufferSize, 0);
this.urlIndex = new Table(f, rowdef, Table.tailCacheDenyUsage, EcoFSBufferSize, 0);
//urlIndex = new kelondroFlexTable(cachePath, tablename, -1, rowdef, 0, true);
this.stack = new LinkedList<String>();
}

@ -1,42 +0,0 @@
// kelondroRecords.java
// (C) 2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 03.08.2007 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
// $LastChangedRevision: 1986 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.kelondro.io;
import java.io.IOException;
import de.anomic.kelondro.table.RecordHandle;
import de.anomic.kelondro.table.Node;
public interface RandomAccessRecords {
// this is now implemented by kelondroTray
// the newNode method is used to define a enumeration in kelondroTray, but is still there abstract
// the real implementation is done in kelondroEcoRecords and kelondroCachedRecords
public Node newNode(RecordHandle handle, byte[] bulk, int offset) throws IOException;
}

@ -1,279 +0,0 @@
// kelondroEcoRecords.java
// (C) 2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 03.07.2007 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
// $LastChangedRevision: 1986 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.kelondro.table;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.TreeMap;
import de.anomic.kelondro.index.Row;
import de.anomic.kelondro.io.RandomAccessInterface;
import de.anomic.kelondro.util.kelondroException;
public class FullRecords extends AbstractRecords {
// static supervision objects: recognize and coordinate all activities
private static TreeMap<String, FullRecords> recordTracker = new TreeMap<String, FullRecords>();
public FullRecords(
final File file,
final short ohbytec, final short ohhandlec,
final Row rowdef, final int FHandles, final int txtProps, final int txtPropWidth) throws IOException {
super(file, true, ohbytec, ohhandlec, rowdef, FHandles, txtProps, txtPropWidth);
recordTracker.put(this.filename, this);
}
public FullRecords(
final RandomAccessInterface ra, final String filename,
final short ohbytec, final short ohhandlec,
final Row rowdef, final int FHandles, final int txtProps, final int txtPropWidth,
final boolean exitOnFail) {
super(ra, filename, true, ohbytec, ohhandlec, rowdef, FHandles, txtProps, txtPropWidth, exitOnFail);
recordTracker.put(this.filename, this);
}
public static final Iterator<String> filenames() {
// iterates string objects; all file names from record tracker
return recordTracker.keySet().iterator();
}
protected synchronized void deleteNode(final RecordHandle handle) throws IOException {
super.deleteNode(handle);
}
public synchronized void close() {
super.close();
recordTracker.remove(this.filename);
}
public Node newNode(final RecordHandle handle, final byte[] bulk, final int offset) throws IOException {
return new EcoNode(handle, bulk, offset);
}
public final class EcoNode implements Node {
private RecordHandle handle = null; // index of the entry, by default NUL means undefined
private byte[] ohChunk = null; // contains overhead values
private byte[] bodyChunk = null; // contains all row values
private boolean ohChanged = false;
private boolean bodyChanged = false;
public EcoNode(final byte[] rowinstance) throws IOException {
// this initializer is used to create nodes from bulk-read byte arrays
assert ((rowinstance == null) || (rowinstance.length == ROW.objectsize)) : "bulkchunk.length = " + (rowinstance == null ? "null" : rowinstance.length) + ", ROW.width(0) = " + ROW.width(0);
this.handle = new RecordHandle(USAGE.allocatePayload(rowinstance));
// create chunks
this.ohChunk = new byte[overhead];
this.bodyChunk = new byte[ROW.objectsize];
for (int i = this.ohChunk.length - 1; i >= 0; i--) this.ohChunk[i] = (byte) 0xff;
if (rowinstance == null) {
for (int i = this.bodyChunk.length - 1; i >= 0; i--) this.bodyChunk[i] = (byte) 0xff;
} else {
System.arraycopy(rowinstance, 0, this.bodyChunk, 0, this.bodyChunk.length);
}
// mark chunks as not changed, we wrote that already during allocatePayload
this.ohChanged = false;
this.bodyChanged = false;
}
public EcoNode(final RecordHandle handle, final byte[] bulkchunk, final int offset) throws IOException {
// this initializer is used to create nodes from bulk-read byte arrays
// if write is true, then the chunk in bulkchunk is written to the file
// othervise it is considered equal to what is stored in the file
// (that is ensured during pre-loaded enumeration)
this.handle = handle;
boolean changed;
if (handle.index >= USAGE.allCount()) {
// this causes only a write action if we create a node beyond the end of the file
USAGE.allocateRecord(handle.index, bulkchunk, offset);
changed = false; // we have already wrote the record, so it is considered as unchanged
} else {
changed = true;
}
assert ((bulkchunk == null) || (bulkchunk.length - offset >= recordsize)) : "bulkchunk.length = " + (bulkchunk == null ? "null" : bulkchunk.length) + ", offset = " + offset + ", recordsize = " + recordsize;
/*if ((offset == 0) && (overhead == 0) && ((bulkchunk == null) || (bulkchunk.length == ROW.objectsize()))) {
this.ohChunk = new byte[0];
if (bulkchunk == null) {
this.bodyChunk = new byte[ROW.objectsize()];
} else {
this.bodyChunk = bulkchunk;
}
} else { */
// create empty chunks
this.ohChunk = new byte[overhead];
this.bodyChunk = new byte[ROW.objectsize];
// write content to chunks
if (bulkchunk != null) {
if (overhead > 0) System.arraycopy(bulkchunk, offset, this.ohChunk, 0, overhead);
System.arraycopy(bulkchunk, offset + overhead, this.bodyChunk, 0, ROW.objectsize);
}
//}
// mark chunks as changed
this.ohChanged = changed;
this.bodyChanged = changed;
}
public EcoNode(final RecordHandle handle) throws IOException {
// this creates an entry with an pre-reserved entry position.
// values can be written using the setValues() method,
// but we expect that values are already there in the file.
assert (handle != null): "node handle is null";
assert (handle.index >= 0): "node handle too low: " + handle.index;
if (handle == null) throw new kelondroException(filename, "INTERNAL ERROR: node handle is null.");
if (handle.index >= USAGE.allCount()) {
throw new kelondroException(filename, "INTERNAL ERROR, Node/init: node handle index " + handle.index + " exceeds size. No auto-fix node was submitted. This is a serious failure.");
}
// use given handle
this.handle = new RecordHandle(handle.index);
// read record
this.ohChunk = new byte[overhead];
if (overhead > 0) entryFile.readFully(seekpos(this.handle), this.ohChunk, 0, overhead);
this.bodyChunk = null; /*new byte[ROW.objectsize];
entryFile.readFully(seekpos(this.handle) + overhead, this.bodyChunk, 0, this.bodyChunk.length);
*/
// mark chunks as not changed
this.ohChanged = false;
this.bodyChanged = false;
}
public RecordHandle handle() {
// if this entry has an index, return it
if (this.handle.index == RecordHandle.NUL) throw new kelondroException(filename, "the entry has no index assigned");
return this.handle;
}
public void setOHByte(final int i, final byte b) {
if (i >= OHBYTEC) throw new IllegalArgumentException("setOHByte: wrong index " + i);
if (this.handle.index == RecordHandle.NUL) throw new kelondroException(filename, "setOHByte: no handle assigned");
this.ohChunk[i] = b;
this.ohChanged = true;
}
public void setOHHandle(final int i, final RecordHandle otherhandle) {
assert (i < OHHANDLEC): "setOHHandle: wrong array size " + i;
assert (this.handle.index != RecordHandle.NUL): "setOHHandle: no handle assigned ind file" + filename;
if (otherhandle == null) {
NUL2bytes(this.ohChunk, OHBYTEC + 4 * i);
} else {
if (otherhandle.index >= USAGE.allCount()) throw new kelondroException(filename, "INTERNAL ERROR, setOHHandles: handle " + i + " exceeds file size (" + handle.index + " >= " + USAGE.allCount() + ")");
int2bytes(otherhandle.index, this.ohChunk, OHBYTEC + 4 * i);
}
this.ohChanged = true;
}
public byte getOHByte(final int i) {
if (i >= OHBYTEC) throw new IllegalArgumentException("getOHByte: wrong index " + i);
if (this.handle.index == RecordHandle.NUL) throw new kelondroException(filename, "Cannot load OH values");
return this.ohChunk[i];
}
public RecordHandle getOHHandle(final int i) {
if (this.handle.index == RecordHandle.NUL) throw new kelondroException(filename, "Cannot load OH values");
assert (i < OHHANDLEC): "handle index out of bounds: " + i + " in file " + filename;
final int h = bytes2int(this.ohChunk, OHBYTEC + 4 * i);
return (h == RecordHandle.NUL) ? null : new RecordHandle(h);
}
public synchronized void setValueRow(final byte[] row) throws IOException {
// if the index is defined, then write values directly to the file, else only to the object
if ((row != null) && (row.length != ROW.objectsize)) throw new IOException("setValueRow with wrong (" + row.length + ") row length instead correct: " + ROW.objectsize);
// set values
if (this.handle.index != RecordHandle.NUL) {
this.bodyChunk = row;
this.bodyChanged = true;
}
}
public synchronized boolean valid() {
// returns true if the key starts with non-zero byte
// this may help to detect deleted entries
return this.bodyChunk == null || (this.bodyChunk[0] != 0) && ((this.bodyChunk[0] != -128) || (this.bodyChunk[1] != 0));
}
public synchronized byte[] getKey() throws IOException {
// read key
if (this.bodyChunk == null) {
// load all values from the database file
this.bodyChunk = new byte[ROW.objectsize];
// read values
entryFile.readFully(seekpos(this.handle) + overhead, this.bodyChunk, 0, this.bodyChunk.length);
}
return trimCopy(this.bodyChunk, 0, ROW.width(0));
}
public synchronized byte[] getValueRow() throws IOException {
if (this.bodyChunk == null) {
// load all values from the database file
this.bodyChunk = new byte[ROW.objectsize];
// read values
entryFile.readFully(seekpos(this.handle) + overhead, this.bodyChunk, 0, this.bodyChunk.length);
}
return this.bodyChunk;
}
public synchronized void commit() throws IOException {
// this must be called after all write operations to the node are finished
// place the data to the file
final boolean doCommit = this.ohChanged || this.bodyChanged;
// save head
synchronized (entryFile) {
if (this.ohChanged) {
//System.out.println("WRITEH(" + filename + ", " + seekpos(this.handle) + ", " + this.headChunk.length + ")");
assert (ohChunk == null) || (ohChunk.length == overhead);
entryFile.write(seekpos(this.handle), (this.ohChunk == null) ? new byte[overhead] : this.ohChunk);
this.ohChanged = false;
}
// save tail
if ((this.bodyChunk != null) && (this.bodyChanged)) {
//System.out.println("WRITET(" + filename + ", " + (seekpos(this.handle) + headchunksize) + ", " + this.tailChunk.length + ")");
assert (this.bodyChunk == null) || (this.bodyChunk.length == ROW.objectsize);
entryFile.write(seekpos(this.handle) + overhead, (this.bodyChunk == null) ? new byte[ROW.objectsize] : this.bodyChunk);
this.bodyChanged = false;
}
if (doCommit) entryFile.commit();
}
}
}
}

@ -1,46 +0,0 @@
// kelondroNode.java
// (C) 2003 - 2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 2003 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
// $LastChangedRevision: 1986 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.kelondro.table;
import java.io.IOException;
public interface Node {
public RecordHandle handle();
public void setOHByte(int i, byte b);
public void setOHHandle(int i, RecordHandle otherhandle);
public byte getOHByte(int i);
public RecordHandle getOHHandle(int i);
public void commit() throws IOException;
public void setValueRow(byte[] row) throws IOException;
public boolean valid();
public byte[] getKey() throws IOException;
public byte[] getValueRow() throws IOException;
public String toString();
}

@ -1,87 +0,0 @@
// kelondroHandle.java
// (C) 2003 - 2007 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 2003 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate: 2006-04-02 22:40:07 +0200 (So, 02 Apr 2006) $
// $LastChangedRevision: 1986 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.kelondro.table;
public class RecordHandle implements Comparable<RecordHandle> {
public final static int NUL = Integer.MIN_VALUE; // the meta value for the kelondroTray' NUL abstraction
protected int index;
protected RecordHandle(final int i) {
assert i != 1198412402;
assert (i == NUL) || (i >= 0) : "node handle index too low: " + i;
//assert (i == NUL) || (i < USAGE.allCount()) : "node handle index too high: " + i + ", USEDC=" + USAGE.USEDC + ", FREEC=" + USAGE.FREEC;
this.index = i;
//if ((USAGE != null) && (this.index != NUL)) USAGE.allocate(this.index);
}
public boolean isNUL() {
return index == NUL;
}
public String toString() {
if (index == NUL) return "NULL";
String s = Integer.toHexString(index);
while (s.length() < 4) s = "0" + s;
return s;
}
public boolean equals(final RecordHandle h) {
assert (index != NUL);
assert (h.index != NUL);
return (this.index == h.index);
}
public boolean equals(final Object h) {
assert (index != NUL);
assert (h instanceof RecordHandle && ((RecordHandle) h).index != NUL);
return (h instanceof RecordHandle && this.index == ((RecordHandle) h).index);
}
public int compare(final RecordHandle h0, final RecordHandle h1) {
assert ((h0).index != NUL);
assert ((h1).index != NUL);
if ((h0).index < (h1).index) return -1;
if ((h0).index > (h1).index) return 1;
return 0;
}
public int compareTo(final RecordHandle h) {
// this is needed for a TreeMap
assert (index != NUL) : "this.index is NUL in compareTo";
assert ((h).index != NUL) : "handle.index is NUL in compareTo";
if (index < (h).index) return -1;
if (index > (h).index) return 1;
return 0;
}
public int hashCode() {
assert (index != NUL);
return this.index;
}
}

@ -46,14 +46,13 @@ import de.anomic.kelondro.io.FileRandomAccess;
import de.anomic.kelondro.io.IOChunksInterface;
import de.anomic.kelondro.io.RandomAccessInterface;
import de.anomic.kelondro.io.RandomAccessIOChunks;
import de.anomic.kelondro.io.RandomAccessRecords;
import de.anomic.kelondro.order.ByteOrder;
import de.anomic.kelondro.order.NaturalOrder;
import de.anomic.kelondro.util.FileUtils;
import de.anomic.kelondro.util.kelondroException;
import de.anomic.yacy.logging.Log;
public abstract class AbstractRecords implements RandomAccessRecords {
public class Records {
private static final boolean useChannel = false;
@ -100,7 +99,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
protected short OHBYTEC; // number of extra bytes in each node
protected short OHHANDLEC; // number of handles in each node
protected Row ROW; // array with widths of columns
private RecordHandle HANDLES[]; // array with handles
private Handle HANDLES[]; // array with handles
private byte[] TXTPROPS[]; // array with text properties
private int TXTPROPW; // size of a single TXTPROPS element
@ -122,13 +121,13 @@ public abstract class AbstractRecords implements RandomAccessRecords {
protected final class usageControl {
protected int USEDC; // counter of used elements
protected int FREEC; // counter of free elements in list of free Nodes
protected RecordHandle FREEH; // pointer to first element in list of free Nodes, empty = NUL
protected Handle FREEH; // pointer to first element in list of free Nodes, empty = NUL
protected usageControl(final boolean init) throws IOException {
if (init) {
this.USEDC = 0;
this.FREEC = 0;
this.FREEH = new RecordHandle(RecordHandle.NUL);
this.FREEH = new Handle(Handle.NUL);
} else {
readusedfree();
try {
@ -176,13 +175,13 @@ public abstract class AbstractRecords implements RandomAccessRecords {
int freeh = entryFile.readInt(POS_FREEH);
if (freeh > this.USEDC) {
logFailure("INCONSISTENCY in FREEH reading: USEDC = " + this.USEDC + ", FREEC = " + this.FREEC + ", this.FREEH = " + freeh + ", file = " + filename);
this.FREEH = new RecordHandle(RecordHandle.NUL);
this.FREEH = new Handle(Handle.NUL);
this.FREEC = 0;
entryFile.writeInt(POS_FREEC, FREEC);
entryFile.writeInt(POS_FREEH, FREEH.index);
entryFile.commit();
} else {
this.FREEH = new RecordHandle(freeh);
this.FREEH = new Handle(freeh);
this.FREEC = entryFile.readInt(POS_FREEC);
}
}
@ -198,12 +197,12 @@ public abstract class AbstractRecords implements RandomAccessRecords {
return this.USEDC;
}
protected synchronized void dispose(final RecordHandle h) throws IOException {
protected synchronized void dispose(final Handle h) throws IOException {
// delete element with handle h
// this element is then connected to the deleted-chain and can be
// re-used change counter
assert (h.index >= 0);
assert (h.index != RecordHandle.NUL);
assert (h.index != Handle.NUL);
synchronized (USAGE) {
synchronized (entryFile) {
assert (h.index < USEDC + FREEC) : "USEDC = " + USEDC + ", FREEC = " + FREEC + ", h.index = " + h.index;
@ -245,7 +244,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
USAGE.FREEC--;
// take link
int index = USAGE.FREEH.index;
if (index == RecordHandle.NUL) {
if (index == Handle.NUL) {
Log.logSevere("kelondroAbstractRecords/" + filename, "INTERNAL ERROR (DATA INCONSISTENCY): re-use of records failed, lost " + (USAGE.FREEC + 1) + " records.");
// try to heal..
USAGE.USEDC = (int) ((entryFile.length() - POS_NODES) / recordsize);
@ -262,12 +261,12 @@ public abstract class AbstractRecords implements RandomAccessRecords {
index = USAGE.allCount(); // a place at the end of the file
USAGE.USEDC += USAGE.FREEC; // to avoid that non-empty records at the end are overwritten
USAGE.FREEC = 0; // discard all possible empty nodes
USAGE.FREEH.index = RecordHandle.NUL;
USAGE.FREEH.index = Handle.NUL;
} else {
// read link to next element of FREEH chain
USAGE.FREEH.index = entryFile.readInt(seekp);
// check consistency
if (((USAGE.FREEH.index != RecordHandle.NUL) || (USAGE.FREEC != 0)) && seekpos(USAGE.FREEH) >= entryFile.length()) {
if (((USAGE.FREEH.index != Handle.NUL) || (USAGE.FREEC != 0)) && seekpos(USAGE.FREEH) >= entryFile.length()) {
// the FREEH pointer cannot be correct, because it points to a place outside of the file.
// to correct this, we reset the FREH pointer and return a index that has been calculated as if USAGE.FREE == 0
Log.logSevere("kelondroAbstractRecords/" + filename, "INTERNAL ERROR (DATA INCONSISTENCY): USAGE.FREEH.index = " + USAGE.FREEH.index + ", entryFile.length() = " + entryFile.length() + "; wrong FREEH has been patched, lost " + (USAGE.FREEC + 1) + " records.");
@ -315,14 +314,14 @@ public abstract class AbstractRecords implements RandomAccessRecords {
} else {
// write beyond the end of the file
// records that are in between are marked as deleted
RecordHandle h;
Handle h;
while (index > USAGE.allCount()) {
h = new RecordHandle(USAGE.allCount());
h = new Handle(USAGE.allCount());
USAGE.FREEC++;
entryFile.writeSpace(seekpos(h), overhead + ROW.objectsize); // occupy space, otherwise the USAGE computation does not work
entryFile.writeInt(seekpos(h), USAGE.FREEH.index);
USAGE.FREEH = h;
assert ((USAGE.FREEH.index == RecordHandle.NUL) && (USAGE.FREEC == 0)) || seekpos(USAGE.FREEH) < entryFile.length() : "allocateRecord: USAGE.FREEH.index = " + USAGE.FREEH.index;
assert ((USAGE.FREEH.index == Handle.NUL) && (USAGE.FREEC == 0)) || seekpos(USAGE.FREEH) < entryFile.length() : "allocateRecord: USAGE.FREEH.index = " + USAGE.FREEH.index;
USAGE.writefree();
entryFile.commit();
}
@ -369,9 +368,10 @@ public abstract class AbstractRecords implements RandomAccessRecords {
}
}
public AbstractRecords(final File file, final boolean useNodeCache,
public Records(final File file,
final short ohbytec, final short ohhandlec,
final Row rowdef, final int FHandles, final int txtProps, final int txtPropWidth) throws IOException {
final Row rowdef, final int FHandles,
final int txtProps, final int txtPropWidth) throws IOException {
// opens an existing file or creates a new file
// file: the file that shall be created
// oha : overhead size array of four bytes: oha[0]=# of bytes, oha[1]=# of shorts, oha[2]=# of ints, oha[3]=# of longs,
@ -393,7 +393,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
//kelondroRA raf = new kelondroCachedRA(new kelondroFileRA(this.filename), 5000000, 1000);
//kelondroRA raf = new kelondroNIOFileRA(this.filename, (file.length() < 4000000), 10000);
//raf = new kelondroCachedRA(raf);
initExistingFile(raf, useNodeCache);
initExistingFile(raf, true);
} else {
this.filename = file.getCanonicalPath();
final RandomAccessInterface raf = (useChannel) ? new ChannelRandomAccess(new File(this.filename)) : new FileRandomAccess(new File(this.filename));
@ -414,7 +414,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
}
}
public AbstractRecords(final RandomAccessInterface ra, final String filename, final boolean useCache,
public Records(final RandomAccessInterface ra, final String filename, final boolean useCache,
final short ohbytec, final short ohhandlec,
final Row rowdef, final int FHandles, final int txtProps, final int txtPropWidth,
final boolean exitOnFail) {
@ -465,8 +465,8 @@ public abstract class AbstractRecords implements RandomAccessRecords {
// store dynamic back-up variables
USAGE = new usageControl(true);
HANDLES = new RecordHandle[FHandles];
for (int i = 0; i < FHandles; i++) HANDLES[i] = new RecordHandle(RecordHandle.NUL);
HANDLES = new Handle[FHandles];
for (int i = 0; i < FHandles; i++) HANDLES[i] = new Handle(Handle.NUL);
TXTPROPS = new byte[txtProps][];
for (int i = 0; i < txtProps; i++) TXTPROPS[i] = new byte[0];
@ -493,8 +493,8 @@ public abstract class AbstractRecords implements RandomAccessRecords {
entryFile.writeInt(POS_COLWIDTHS + 4 * i, this.ROW.width(i));
}
for (int i = 0; i < this.HANDLES.length; i++) {
entryFile.writeInt(POS_HANDLES + 4 * i, RecordHandle.NUL);
HANDLES[i] = new RecordHandle(RecordHandle.NUL);
entryFile.writeInt(POS_HANDLES + 4 * i, Handle.NUL);
HANDLES[i] = new Handle(Handle.NUL);
}
final byte[] ea = new byte[TXTPROPW];
for (int j = 0; j < TXTPROPW; j++) ea[j] = 0;
@ -543,7 +543,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
this.theLogger.fine("KELONDRO DEBUG " + this.filename + ": " + message);
}
public AbstractRecords(final RandomAccessInterface ra, final String filename, final boolean useNodeCache) throws IOException{
public Records(final RandomAccessInterface ra, final String filename, final boolean useNodeCache) throws IOException{
this.fileExisted = false;
this.filename = filename;
initExistingFile(ra, useNodeCache);
@ -566,7 +566,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
this.OHHANDLEC = entryFile.readShort(POS_OHHANDLEC);
final Column[] COLDEFS = new Column[entryFile.readShort(POS_COLUMNS)];
this.HANDLES = new RecordHandle[entryFile.readInt(POS_INTPROPC)];
this.HANDLES = new Handle[entryFile.readInt(POS_INTPROPC)];
this.TXTPROPS = new byte[entryFile.readInt(POS_TXTPROPC)][];
this.TXTPROPW = entryFile.readInt(POS_TXTPROPW);
@ -583,7 +583,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
COLDEFS[i] = new Column("col-" + i, Column.celltype_binary, Column.encoder_bytes, entryFile.readInt(POS_COLWIDTHS + 4 * i), "");
}
for (int i = 0; i < HANDLES.length; i++) {
HANDLES[i] = new RecordHandle(entryFile.readInt(POS_HANDLES + 4 * i));
HANDLES[i] = new Handle(entryFile.readInt(POS_HANDLES + 4 * i));
}
for (int i = 0; i < TXTPROPS.length; i++) {
TXTPROPS[i] = new byte[TXTPROPW];
@ -635,7 +635,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
return bulk;
}
protected synchronized void deleteNode(final RecordHandle handle) throws IOException {
protected synchronized void deleteNode(final Handle handle) throws IOException {
USAGE.dispose(handle);
}
@ -669,7 +669,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
this.ROW = rowdef;
}
protected final long seekpos(final RecordHandle handle) {
protected final long seekpos(final Handle handle) {
assert (handle.index >= 0): "handle index too low: " + handle.index;
return POS_NODES + ((long) recordsize * (long) handle.index);
}
@ -684,16 +684,16 @@ public abstract class AbstractRecords implements RandomAccessRecords {
return this.HANDLES.length;
}
protected final void setHandle(final int pos, RecordHandle handle) throws IOException {
protected final void setHandle(final int pos, Handle handle) throws IOException {
if (pos >= HANDLES.length) throw new IllegalArgumentException("setHandle: handle array exceeded");
if (handle == null) handle = new RecordHandle(RecordHandle.NUL);
if (handle == null) handle = new Handle(Handle.NUL);
HANDLES[pos] = handle;
entryFile.writeInt(POS_HANDLES + 4L * pos, handle.index);
}
protected final RecordHandle getHandle(final int pos) {
protected final Handle getHandle(final int pos) {
if (pos >= HANDLES.length) throw new IllegalArgumentException("getHandle: handle array exceeded");
return (HANDLES[pos].index == RecordHandle.NUL) ? null : HANDLES[pos];
return (HANDLES[pos].index == Handle.NUL) ? null : HANDLES[pos];
}
// custom texts
@ -724,24 +724,24 @@ public abstract class AbstractRecords implements RandomAccessRecords {
return USAGE.FREEC;
}
protected final Set<RecordHandle> deletedHandles(final long maxTime) throws IOException {
protected final Set<Handle> deletedHandles(final long maxTime) throws IOException {
// initialize set with deleted nodes; the set contains Handle-Objects
// this may last only the given maxInitTime
// if the initTime is exceeded, the method returns what it found so far
final TreeSet<RecordHandle> markedDeleted = new TreeSet<RecordHandle>();
final TreeSet<Handle> markedDeleted = new TreeSet<Handle>();
final long timeLimit = (maxTime < 0) ? Long.MAX_VALUE : System.currentTimeMillis() + maxTime;
long seekp;
synchronized (USAGE) {
if (USAGE.FREEC != 0) {
RecordHandle h = USAGE.FREEH;
Handle h = USAGE.FREEH;
long repair_position = POS_FREEH;
while (h.index != RecordHandle.NUL) {
while (h.index != Handle.NUL) {
// check handle
seekp = seekpos(h);
if (seekp > entryFile.length()) {
// repair last handle store position
this.theLogger.severe("KELONDRO WARNING " + this.filename + ": seek position " + seekp + "/" + h.index + " out of file size " + entryFile.length() + "/" + ((entryFile.length() - POS_NODES) / recordsize) + " after " + markedDeleted.size() + " iterations; patched wrong node");
entryFile.writeInt(repair_position, RecordHandle.NUL);
entryFile.writeInt(repair_position, Handle.NUL);
return markedDeleted;
}
@ -750,14 +750,14 @@ public abstract class AbstractRecords implements RandomAccessRecords {
// move to next handle
repair_position = seekp;
h = new RecordHandle(entryFile.readInt(seekp));
if (h.index == RecordHandle.NUL) break;
h = new Handle(entryFile.readInt(seekp));
if (h.index == Handle.NUL) break;
// double-check for already stored handles: detect loops
if (markedDeleted.contains(h)) {
// loop detection
this.theLogger.severe("KELONDRO WARNING " + this.filename + ": FREE-Queue contains loops");
entryFile.writeInt(repair_position, RecordHandle.NUL);
entryFile.writeInt(repair_position, Handle.NUL);
return markedDeleted;
}
@ -811,10 +811,10 @@ public abstract class AbstractRecords implements RandomAccessRecords {
}
public final static void NUL2bytes(final byte[] b, final int offset) {
b[offset ] = (byte) (0XFF & (RecordHandle.NUL >> 24));
b[offset + 1] = (byte) (0XFF & (RecordHandle.NUL >> 16));
b[offset + 2] = (byte) (0XFF & (RecordHandle.NUL >> 8));
b[offset + 3] = (byte) (0XFF & RecordHandle.NUL);
b[offset ] = (byte) (0XFF & (Handle.NUL >> 24));
b[offset + 1] = (byte) (0XFF & (Handle.NUL >> 16));
b[offset + 2] = (byte) (0XFF & (Handle.NUL >> 8));
b[offset + 3] = (byte) (0XFF & Handle.NUL);
}
public final static void int2bytes(final long i, final byte[] b, final int offset) {
@ -858,7 +858,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
System.out.println(" USEDC : " + USAGE.used());
System.out.println(" FREEC : " + USAGE.FREEC);
System.out.println(" FREEH : " + USAGE.FREEH.toString());
System.out.println(" NUL repres.: 0x" + Integer.toHexString(RecordHandle.NUL));
System.out.println(" NUL repres.: 0x" + Integer.toHexString(Handle.NUL));
System.out.println(" Data Offset: 0x" + Long.toHexString(POS_NODES));
System.out.println("--");
System.out.println("RECORDS");
@ -869,9 +869,9 @@ public abstract class AbstractRecords implements RandomAccessRecords {
System.out.println(" Recordsize : " + this.recordsize + " bytes");
System.out.println("--");
System.out.println("DELETED HANDLES");
final Set<RecordHandle> dh = deletedHandles(-1);
final Iterator<RecordHandle> dhi = dh.iterator();
RecordHandle h;
final Set<Handle> dh = deletedHandles(-1);
final Iterator<Handle> dhi = dh.iterator();
Handle h;
while (dhi.hasNext()) {
h = dhi.next();
System.out.print(h.index + ", ");
@ -949,8 +949,8 @@ public abstract class AbstractRecords implements RandomAccessRecords {
// all records that are marked as deleted are ommitted
// this is probably also the fastest way to iterate all objects
private final Set<RecordHandle> markedDeleted;
private final RecordHandle pos;
private final Set<Handle> markedDeleted;
private final Handle pos;
private final byte[] bulk;
private final int bulksize;
private int bulkstart; // the offset of the bulk array to the node position
@ -963,7 +963,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
fullyMarked = (maxInitTime < 0);
// seek first position according the delete node set
pos = new RecordHandle(0);
pos = new Handle(0);
while ((markedDeleted.contains(pos)) && (pos.index < USAGE.allCount())) pos.index++;
// initialize bulk
@ -990,7 +990,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
public Node next0() {
// read Objects until a non-deleted Node appears
while (hasNext0()) {
Node nn;
Node nn;
try {
nn = next00();
} catch (final IOException e) {
@ -1034,7 +1034,7 @@ public abstract class AbstractRecords implements RandomAccessRecords {
POS_NODES = 302, bulkstart = 820, recordsize = 2621466
POS_NODES = 302, bulkstart = 13106, recordsize = 163866 */
// read node from bulk
final Node n = newNode(new RecordHandle(pos.index), bulk, (pos.index - bulkstart) * recordsize);
final Node n = newNode(new Handle(pos.index), bulk, (pos.index - bulkstart) * recordsize);
pos.index++;
while ((markedDeleted.contains(pos)) && (pos.index < USAGE.allCount())) pos.index++;
return n;
@ -1059,6 +1059,268 @@ public abstract class AbstractRecords implements RandomAccessRecords {
this.entryFile.deleteOnExit();
}
public abstract Node newNode(RecordHandle handle, byte[] bulk, int offset) throws IOException;
public Node newNode(final Handle handle, final byte[] bulk, final int offset) throws IOException {
return new Node(handle, bulk, offset);
}
public final class Node {
private Handle handle = null; // index of the entry, by default NUL means undefined
private byte[] ohChunk = null; // contains overhead values
private byte[] bodyChunk = null; // contains all row values
private boolean ohChanged = false;
private boolean bodyChanged = false;
public Node(final byte[] rowinstance) throws IOException {
// this initializer is used to create nodes from bulk-read byte arrays
assert ((rowinstance == null) || (rowinstance.length == ROW.objectsize)) : "bulkchunk.length = " + (rowinstance == null ? "null" : rowinstance.length) + ", ROW.width(0) = " + ROW.width(0);
this.handle = new Handle(USAGE.allocatePayload(rowinstance));
// create chunks
this.ohChunk = new byte[overhead];
this.bodyChunk = new byte[ROW.objectsize];
for (int i = this.ohChunk.length - 1; i >= 0; i--) this.ohChunk[i] = (byte) 0xff;
if (rowinstance == null) {
for (int i = this.bodyChunk.length - 1; i >= 0; i--) this.bodyChunk[i] = (byte) 0xff;
} else {
System.arraycopy(rowinstance, 0, this.bodyChunk, 0, this.bodyChunk.length);
}
// mark chunks as not changed, we wrote that already during allocatePayload
this.ohChanged = false;
this.bodyChanged = false;
}
public Node(final Handle handle, final byte[] bulkchunk, final int offset) throws IOException {
// this initializer is used to create nodes from bulk-read byte arrays
// if write is true, then the chunk in bulkchunk is written to the file
// othervise it is considered equal to what is stored in the file
// (that is ensured during pre-loaded enumeration)
this.handle = handle;
boolean changed;
if (handle.index >= USAGE.allCount()) {
// this causes only a write action if we create a node beyond the end of the file
USAGE.allocateRecord(handle.index, bulkchunk, offset);
changed = false; // we have already wrote the record, so it is considered as unchanged
} else {
changed = true;
}
assert ((bulkchunk == null) || (bulkchunk.length - offset >= recordsize)) : "bulkchunk.length = " + (bulkchunk == null ? "null" : bulkchunk.length) + ", offset = " + offset + ", recordsize = " + recordsize;
/*if ((offset == 0) && (overhead == 0) && ((bulkchunk == null) || (bulkchunk.length == ROW.objectsize()))) {
this.ohChunk = new byte[0];
if (bulkchunk == null) {
this.bodyChunk = new byte[ROW.objectsize()];
} else {
this.bodyChunk = bulkchunk;
}
} else { */
// create empty chunks
this.ohChunk = new byte[overhead];
this.bodyChunk = new byte[ROW.objectsize];
// write content to chunks
if (bulkchunk != null) {
if (overhead > 0) System.arraycopy(bulkchunk, offset, this.ohChunk, 0, overhead);
System.arraycopy(bulkchunk, offset + overhead, this.bodyChunk, 0, ROW.objectsize);
}
//}
// mark chunks as changed
this.ohChanged = changed;
this.bodyChanged = changed;
}
public Node(final Handle handle) throws IOException {
// this creates an entry with an pre-reserved entry position.
// values can be written using the setValues() method,
// but we expect that values are already there in the file.
assert (handle != null): "node handle is null";
assert (handle.index >= 0): "node handle too low: " + handle.index;
if (handle == null) throw new kelondroException(filename, "INTERNAL ERROR: node handle is null.");
if (handle.index >= USAGE.allCount()) {
throw new kelondroException(filename, "INTERNAL ERROR, Node/init: node handle index " + handle.index + " exceeds size. No auto-fix node was submitted. This is a serious failure.");
}
// use given handle
this.handle = new Handle(handle.index);
// read record
this.ohChunk = new byte[overhead];
if (overhead > 0) entryFile.readFully(seekpos(this.handle), this.ohChunk, 0, overhead);
this.bodyChunk = null; /*new byte[ROW.objectsize];
entryFile.readFully(seekpos(this.handle) + overhead, this.bodyChunk, 0, this.bodyChunk.length);
*/
// mark chunks as not changed
this.ohChanged = false;
this.bodyChanged = false;
}
public Handle handle() {
// if this entry has an index, return it
if (this.handle.index == Handle.NUL) throw new kelondroException(filename, "the entry has no index assigned");
return this.handle;
}
public void setOHByte(final int i, final byte b) {
if (i >= OHBYTEC) throw new IllegalArgumentException("setOHByte: wrong index " + i);
if (this.handle.index == Handle.NUL) throw new kelondroException(filename, "setOHByte: no handle assigned");
this.ohChunk[i] = b;
this.ohChanged = true;
}
public void setOHHandle(final int i, final Handle otherhandle) {
assert (i < OHHANDLEC): "setOHHandle: wrong array size " + i;
assert (this.handle.index != Handle.NUL): "setOHHandle: no handle assigned ind file" + filename;
if (otherhandle == null) {
NUL2bytes(this.ohChunk, OHBYTEC + 4 * i);
} else {
if (otherhandle.index >= USAGE.allCount()) throw new kelondroException(filename, "INTERNAL ERROR, setOHHandles: handle " + i + " exceeds file size (" + handle.index + " >= " + USAGE.allCount() + ")");
int2bytes(otherhandle.index, this.ohChunk, OHBYTEC + 4 * i);
}
this.ohChanged = true;
}
public byte getOHByte(final int i) {
if (i >= OHBYTEC) throw new IllegalArgumentException("getOHByte: wrong index " + i);
if (this.handle.index == Handle.NUL) throw new kelondroException(filename, "Cannot load OH values");
return this.ohChunk[i];
}
public Handle getOHHandle(final int i) {
if (this.handle.index == Handle.NUL) throw new kelondroException(filename, "Cannot load OH values");
assert (i < OHHANDLEC): "handle index out of bounds: " + i + " in file " + filename;
final int h = bytes2int(this.ohChunk, OHBYTEC + 4 * i);
return (h == Handle.NUL) ? null : new Handle(h);
}
public synchronized void setValueRow(final byte[] row) throws IOException {
// if the index is defined, then write values directly to the file, else only to the object
if ((row != null) && (row.length != ROW.objectsize)) throw new IOException("setValueRow with wrong (" + row.length + ") row length instead correct: " + ROW.objectsize);
// set values
if (this.handle.index != Handle.NUL) {
this.bodyChunk = row;
this.bodyChanged = true;
}
}
public synchronized boolean valid() {
// returns true if the key starts with non-zero byte
// this may help to detect deleted entries
return this.bodyChunk == null || (this.bodyChunk[0] != 0) && ((this.bodyChunk[0] != -128) || (this.bodyChunk[1] != 0));
}
public synchronized byte[] getKey() throws IOException {
// read key
if (this.bodyChunk == null) {
// load all values from the database file
this.bodyChunk = new byte[ROW.objectsize];
// read values
entryFile.readFully(seekpos(this.handle) + overhead, this.bodyChunk, 0, this.bodyChunk.length);
}
return trimCopy(this.bodyChunk, 0, ROW.width(0));
}
public synchronized byte[] getValueRow() throws IOException {
if (this.bodyChunk == null) {
// load all values from the database file
this.bodyChunk = new byte[ROW.objectsize];
// read values
entryFile.readFully(seekpos(this.handle) + overhead, this.bodyChunk, 0, this.bodyChunk.length);
}
return this.bodyChunk;
}
public synchronized void commit() throws IOException {
// this must be called after all write operations to the node are finished
// place the data to the file
final boolean doCommit = this.ohChanged || this.bodyChanged;
// save head
synchronized (entryFile) {
if (this.ohChanged) {
//System.out.println("WRITEH(" + filename + ", " + seekpos(this.handle) + ", " + this.headChunk.length + ")");
assert (ohChunk == null) || (ohChunk.length == overhead);
entryFile.write(seekpos(this.handle), (this.ohChunk == null) ? new byte[overhead] : this.ohChunk);
this.ohChanged = false;
}
// save tail
if ((this.bodyChunk != null) && (this.bodyChanged)) {
//System.out.println("WRITET(" + filename + ", " + (seekpos(this.handle) + headchunksize) + ", " + this.tailChunk.length + ")");
assert (this.bodyChunk == null) || (this.bodyChunk.length == ROW.objectsize);
entryFile.write(seekpos(this.handle) + overhead, (this.bodyChunk == null) ? new byte[ROW.objectsize] : this.bodyChunk);
this.bodyChanged = false;
}
if (doCommit) entryFile.commit();
}
}
}
public class Handle implements Comparable<Handle> {
public final static int NUL = Integer.MIN_VALUE; // the meta value for the kelondroTray' NUL abstraction
protected int index;
protected Handle(final int i) {
assert i != 1198412402;
assert (i == NUL) || (i >= 0) : "node handle index too low: " + i;
//assert (i == NUL) || (i < USAGE.allCount()) : "node handle index too high: " + i + ", USEDC=" + USAGE.USEDC + ", FREEC=" + USAGE.FREEC;
this.index = i;
//if ((USAGE != null) && (this.index != NUL)) USAGE.allocate(this.index);
}
public boolean isNUL() {
return index == NUL;
}
public String toString() {
if (index == NUL) return "NULL";
String s = Integer.toHexString(index);
while (s.length() < 4) s = "0" + s;
return s;
}
public boolean equals(final Handle h) {
assert (index != NUL);
assert (h.index != NUL);
return (this.index == h.index);
}
public boolean equals(final Object h) {
assert (index != NUL);
assert (h instanceof Handle && ((Handle) h).index != NUL);
return (h instanceof Handle && this.index == ((Handle) h).index);
}
public int compare(final Handle h0, final Handle h1) {
assert ((h0).index != NUL);
assert ((h1).index != NUL);
if ((h0).index < (h1).index) return -1;
if ((h0).index > (h1).index) return 1;
return 0;
}
public int compareTo(final Handle h) {
// this is needed for a TreeMap
assert (index != NUL) : "this.index is NUL in compareTo";
assert ((h).index != NUL) : "handle.index is NUL in compareTo";
if (index < (h).index) return -1;
if (index > (h).index) return 1;
return 0;
}
public int hashCode() {
assert (index != NUL);
return this.index;
}
}
}

@ -81,14 +81,14 @@ public class Relations {
if (!list[i].equals(targetfilename)) continue;
final Row row = rowdef(list[i]);
if (row.primaryKeyLength != keysize || row.column(1).cellwidth != payloadsize) continue; // a wrong table
final ObjectIndex table = new EcoTable(new File(baseDir, list[i]), row, EcoTable.tailCacheUsageAuto, 1024*1024, 0);
final ObjectIndex table = new Table(new File(baseDir, list[i]), row, Table.tailCacheUsageAuto, 1024*1024, 0);
relations.put(name, table);
return;
}
}
// the relation does not exist, create it
final Row row = rowdef(keysize, payloadsize);
final ObjectIndex table = new EcoTable(new File(baseDir, targetfilename), row, EcoTable.tailCacheUsageAuto, 1024*1024, 0);
final ObjectIndex table = new Table(new File(baseDir, targetfilename), row, Table.tailCacheUsageAuto, 1024*1024, 0);
relations.put(name, table);
}
@ -101,7 +101,7 @@ public class Relations {
for (int i = 0; i < list.length; i++) {
if (list[i].startsWith(name)) {
final Row row = rowdef(list[i]);
final ObjectIndex table = new EcoTable(new File(baseDir, list[i]), row, EcoTable.tailCacheUsageAuto, 1024*1024, 0);
final ObjectIndex table = new Table(new File(baseDir, list[i]), row, Table.tailCacheUsageAuto, 1024*1024, 0);
relations.put(name, table);
return table;
}

@ -164,7 +164,7 @@ public class SplitTable implements ObjectIndex {
maxtime = time;
}
ram = EcoTable.staticRAMIndexNeed(f, rowdef);
ram = Table.staticRAMIndexNeed(f, rowdef);
if (ram > 0) {
t.put(tablefile[i], Long.valueOf(ram));
sum += ram;
@ -197,7 +197,7 @@ public class SplitTable implements ObjectIndex {
if (maxf != null) {
f = new File(path, maxf);
Log.logInfo("kelondroSplitTable", "opening partial eco table " + f);
table = new EcoTable(f, rowdef, EcoTable.tailCacheUsageAuto, EcoFSBufferSize, 0);
table = new Table(f, rowdef, Table.tailCacheUsageAuto, EcoFSBufferSize, 0);
tables.put(maxf, table);
}
}
@ -267,7 +267,7 @@ public class SplitTable implements ObjectIndex {
private ObjectIndex newTable() {
this.current = newFilename();
final File f = new File(path, this.current);
EcoTable table = new EcoTable(f, rowdef, EcoTable.tailCacheDenyUsage, EcoFSBufferSize, 0);
Table table = new Table(f, rowdef, Table.tailCacheDenyUsage, EcoFSBufferSize, 0);
tables.put(this.current, table);
return table;
}

@ -40,7 +40,7 @@ import de.anomic.kelondro.util.FileUtils;
import de.anomic.kelondro.util.kelondroException;
import de.anomic.yacy.logging.Log;
public final class Stack extends FullRecords {
public final class Stack extends Records {
// define the Over-Head-Array
private static short thisOHBytes = 0; // our record definition does not need extra bytes
@ -99,8 +99,8 @@ public final class Stack extends FullRecords {
}
public class stackIterator implements Iterator<Row.Entry> {
RecordHandle nextHandle = null;
RecordHandle lastHandle = null;
Records.Handle nextHandle = null;
Records.Handle lastHandle = null;
boolean up;
public stackIterator(final boolean up) {
@ -115,8 +115,8 @@ public final class Stack extends FullRecords {
public Row.Entry next() {
lastHandle = nextHandle;
try {
nextHandle = new EcoNode(nextHandle).getOHHandle((up) ? right : left);
return row().newEntry(new EcoNode(lastHandle).getValueRow());
nextHandle = new Node(nextHandle).getOHHandle((up) ? right : left);
return row().newEntry(new Node(lastHandle).getValueRow());
} catch (final IOException e) {
e.printStackTrace();
throw new kelondroException(filename, "IO error at stackIterator.next(): " + e.getMessage());
@ -125,7 +125,7 @@ public final class Stack extends FullRecords {
public void remove() {
try {
unlinkNode(new EcoNode(lastHandle));
unlinkNode(new Node(lastHandle));
} catch (final IOException e) {
e.printStackTrace();
}
@ -141,7 +141,7 @@ public final class Stack extends FullRecords {
if (getHandle(toor) == null) {
if (getHandle(root) != null) throw new RuntimeException("push: internal organisation of root and toor");
// create node
final Node n = new EcoNode(row.bytes());
final Node n = new Node(row.bytes());
n.setOHHandle(left, null);
n.setOHHandle(right, null);
n.commit();
@ -151,10 +151,10 @@ public final class Stack extends FullRecords {
// thats it
} else {
// expand the list at the end
final Node n = new EcoNode(row.bytes());
final Node n = new Node(row.bytes());
n.setOHHandle(left, getHandle(toor));
n.setOHHandle(right, null);
final Node n1 = new EcoNode(getHandle(toor));
final Node n1 = new Node(getHandle(toor));
n1.setOHHandle(right, n.handle());
n.commit();
n1.commit();
@ -209,15 +209,15 @@ public final class Stack extends FullRecords {
private void unlinkNode(final Node n) throws IOException {
// join chaines over node
final RecordHandle l = n.getOHHandle(left);
final RecordHandle r = n.getOHHandle(right);
final Records.Handle l = n.getOHHandle(left);
final Records.Handle r = n.getOHHandle(right);
// look left
if (l == null) {
// reached the root on left side
setHandle(root, r);
} else {
// un-link the previous record
final Node k = new EcoNode(l);
final Node k = new Node(l);
k.setOHHandle(left, k.getOHHandle(left));
k.setOHHandle(right, r);
k.commit();
@ -228,7 +228,7 @@ public final class Stack extends FullRecords {
setHandle(toor, l);
} else {
// un-link the following record
final Node k = new EcoNode(r);
final Node k = new Node(r);
k.setOHHandle(left, l);
k.setOHHandle(right, k.getOHHandle(right));
k.commit();
@ -238,9 +238,9 @@ public final class Stack extends FullRecords {
private Node topNode() throws IOException {
// return node ontop of the stack
if (size() == 0) return null;
final RecordHandle h = getHandle(toor);
final Records.Handle h = getHandle(toor);
if (h == null) return null;
return new EcoNode(h);
return new Node(h);
}
private Node botNode() throws IOException {
@ -249,12 +249,12 @@ public final class Stack extends FullRecords {
Log.logInfo("Stack", "size() == 0");
return null;
}
final RecordHandle h = getHandle(root);
final Records.Handle h = getHandle(root);
if (h == null) {
Log.logInfo("Stack", "getHandle(root) == null in " + this.filename);
return null;
}
return new EcoNode(h);
return new Node(h);
}
public int imp(final File file, final String separator) throws IOException {

@ -64,10 +64,10 @@ import de.anomic.yacy.logging.Log;
* The content cache can also be deleted during run-time, if the available RAM gets too low.
*/
public class EcoTable implements ObjectIndex {
public class Table implements ObjectIndex {
// static tracker objects
private static TreeMap<String, EcoTable> tableTracker = new TreeMap<String, EcoTable>();
private static TreeMap<String, Table> tableTracker = new TreeMap<String, Table>();
public static final int tailCacheDenyUsage = 0;
public static final int tailCacheForceUsage = 1;
@ -84,7 +84,7 @@ public class EcoTable implements ObjectIndex {
private Row taildef;
private final int buffersize;
public EcoTable(final File tablefile, final Row rowdef, final int useTailCache, final int buffersize, final int initialSpace) {
public Table(final File tablefile, final Row rowdef, final int useTailCache, final int buffersize, final int initialSpace) {
this.tablefile = tablefile;
this.rowdef = rowdef;
this.buffersize = buffersize;
@ -122,21 +122,21 @@ public class EcoTable implements ObjectIndex {
((useTailCache == tailCacheForceUsage) ||
((useTailCache == tailCacheUsageAuto) && (MemoryControl.free() > neededRAM4table + 200 * 1024 * 1024)))) ?
new RowSet(taildef, records) : null;
Log.logInfo("ECOTABLE", "initialization of " + tablefile + ": available RAM: " + (MemoryControl.available() / 1024 / 1024) + "MB, allocating space for " + records + " entries");
Log.logInfo("TABLE", "initialization of " + tablefile + ": available RAM: " + (MemoryControl.available() / 1024 / 1024) + "MB, allocating space for " + records + " entries");
final long neededRAM4index = 2 * 1024 * 1024 + records * (rowdef.primaryKeyLength + 4) * 3 / 2;
if (!MemoryControl.request(neededRAM4index, false)) {
// despite calculations seemed to show that there is enough memory for the table AND the index
// there is now not enough memory left for the index. So delete the table again to free the memory
// for the index
Log.logSevere("ECOTABLE", tablefile + ": not enough RAM (" + (MemoryControl.available() / 1024 / 1024) + "MB) left for index, deleting allocated table space to enable index space allocation (needed: " + (neededRAM4index / 1024 / 1024) + "MB)");
Log.logSevere("TABLE", tablefile + ": not enough RAM (" + (MemoryControl.available() / 1024 / 1024) + "MB) left for index, deleting allocated table space to enable index space allocation (needed: " + (neededRAM4index / 1024 / 1024) + "MB)");
table = null; System.gc();
Log.logSevere("ECOTABLE", tablefile + ": RAM after releasing the table: " + (MemoryControl.available() / 1024 / 1024) + "MB");
Log.logSevere("TABLE", tablefile + ": RAM after releasing the table: " + (MemoryControl.available() / 1024 / 1024) + "MB");
}
index = new HandleMap(rowdef.primaryKeyLength, rowdef.objectOrder, 4, records, 100000);
Log.logInfo("ECOTABLE", tablefile + ": EcoTable " + tablefile.toString() + " has table copy " + ((table == null) ? "DISABLED" : "ENABLED"));
Log.logInfo("TABLE", tablefile + ": TABLE " + tablefile.toString() + " has table copy " + ((table == null) ? "DISABLED" : "ENABLED"));
// read all elements from the file into the copy table
Log.logInfo("ECOTABLE", "initializing RAM index for EcoTable " + tablefile.getName() + ", please wait.");
Log.logInfo("TABLE", "initializing RAM index for TABLE " + tablefile.getName() + ", please wait.");
int i = 0;
byte[] key;
if (table == null) {
@ -179,7 +179,7 @@ public class EcoTable implements ObjectIndex {
//assert index.size() + doubles.size() + fail == i;
//System.out.println(" -removed " + doubles.size() + " doubles- done.");
if (doubles.size() > 0) {
Log.logInfo("ECOTABLE", tablefile + ": WARNING - EcoTable " + tablefile + " has " + doubles.size() + " doubles");
Log.logInfo("TABLE", tablefile + ": WARNING - TABLE " + tablefile + " has " + doubles.size() + " doubles");
// from all the doubles take one, put it back to the index and remove the others from the file
// first put back one element each
final byte[] record = new byte[rowdef.objectsize];
@ -236,8 +236,8 @@ public class EcoTable implements ObjectIndex {
// returns a map for each file in the tracker;
// the map represents properties for each record objects,
// i.e. for cache memory allocation
final EcoTable theEcoTable = tableTracker.get(filename);
return theEcoTable.memoryStats();
final Table theTABLE = tableTracker.get(filename);
return theTABLE.memoryStats();
}
private final Map<String, String> memoryStats() {
@ -320,7 +320,7 @@ public class EcoTable implements ObjectIndex {
d.remove(s);
this.removeInFile(s.intValue());
if (System.currentTimeMillis() - lastlog > 30000) {
Log.logInfo("EcoTable", "removing " + d.size() + " entries in " + this.filename());
Log.logInfo("TABLE", "removing " + d.size() + " entries in " + this.filename());
lastlog = System.currentTimeMillis();
}
}
@ -693,7 +693,7 @@ public class EcoTable implements ObjectIndex {
}
public void remove() {
throw new UnsupportedOperationException("no remove in EcoTable");
throw new UnsupportedOperationException("no remove in TABLE");
}
}
@ -727,7 +727,7 @@ public class EcoTable implements ObjectIndex {
private static ObjectIndex testTable(final File f, final String testentities, final int testcase) throws IOException {
if (f.exists()) FileUtils.deletedelete(f);
final Row rowdef = new Row("byte[] a-4, byte[] b-4", NaturalOrder.naturalOrder);
final ObjectIndex tt = new EcoTable(f, rowdef, testcase, 100, 0);
final ObjectIndex tt = new Table(f, rowdef, testcase, 100, 0);
byte[] b;
final Row.Entry row = rowdef.newEntry();
for (int i = 0; i < testentities.length(); i++) {
@ -813,7 +813,7 @@ public class EcoTable implements ObjectIndex {
/*
kelondroRow row = new kelondroRow("byte[] key-4, byte[] x-5", kelondroNaturalOrder.naturalOrder, 0);
try {
kelondroEcoTable t = new kelondroEcoTable(f, row);
kelondroTABLE t = new kelondroTABLE(f, row);
kelondroRow.Entry entry = row.newEntry();
entry.setCol(0, "abcd".getBytes());
entry.setCol(1, "dummy".getBytes());

@ -37,7 +37,7 @@ import de.anomic.kelondro.order.Base64Order;
import de.anomic.kelondro.order.Bitfield;
import de.anomic.kelondro.order.CloneableIterator;
import de.anomic.kelondro.order.MicroDate;
import de.anomic.kelondro.table.EcoTable;
import de.anomic.kelondro.table.Table;
import de.anomic.kelondro.text.IndexCell;
import de.anomic.kelondro.text.ReferenceContainer;
import de.anomic.kelondro.text.Segment;
@ -247,7 +247,7 @@ public class plasmaRankingCRProcess {
IndexCell<WordReference> newseq = null;
if (newdb) {
final File path = to_file.getParentFile(); // path to storage place
newacc = new EcoTable(new File(path, CRG_accname), CRG_accrow, EcoTable.tailCacheUsageAuto, 0, 0);
newacc = new Table(new File(path, CRG_accname), CRG_accrow, Table.tailCacheUsageAuto, 0, 0);
newseq = new IndexCell<WordReference>(
path,
Segment.wordReferenceFactory,

@ -52,7 +52,7 @@ import java.util.Iterator;
import de.anomic.kelondro.index.Row;
import de.anomic.kelondro.index.ObjectIndex;
import de.anomic.kelondro.order.Base64Order;
import de.anomic.kelondro.table.EcoTable;
import de.anomic.kelondro.table.Table;
import de.anomic.kelondro.util.DateFormatter;
import de.anomic.kelondro.util.FileUtils;
import de.anomic.kelondro.util.kelondroException;
@ -65,14 +65,14 @@ public class yacyNewsDB {
public yacyNewsDB(final File path) {
this.path = path;
this.news = new EcoTable(path, yacyNewsRecord.rowdef, EcoTable.tailCacheUsageAuto, 10, 0);
this.news = new Table(path, yacyNewsRecord.rowdef, Table.tailCacheUsageAuto, 10, 0);
//this.news = new kelondroCache(kelondroTree.open(path, true, preloadTime, yacyNewsRecord.rowdef));
}
private void resetDB() {
try {close();} catch (final Exception e) {}
if (path.exists()) FileUtils.deletedelete(path);
this.news = new EcoTable(path, yacyNewsRecord.rowdef, EcoTable.tailCacheUsageAuto, 10, 0);
this.news = new Table(path, yacyNewsRecord.rowdef, Table.tailCacheUsageAuto, 10, 0);
}
public void close() {

Loading…
Cancel
Save