fixed problem with initial cache load

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@3378 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 18 years ago
parent 2d8e472cfd
commit e72d253577

@ -103,6 +103,9 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
// create a node at position index with rowentry
Handle h = new Handle(index);
newNode(h, (rowentry == null) ? null : rowentry.bytes(), 0, true).commit(CP_NONE);
// attention! this newNode call wants that the OH bytes are passed within the bulkchunk
// field. Here, only the rowentry.bytes() rare payload is passed. This is valid, because
// the OHbytes and OHhandles are zero.
}
public synchronized kelondroRow.Entry get(int index) throws IOException {

@ -656,7 +656,6 @@ public class kelondroRecords {
this.writeDouble = 0;
this.cacheDelete = 0;
this.cacheFlush = 0;
/*
// pre-load node cache
if ((preloadTime > 0) && (cacheSize > 0)) {
long stop = System.currentTimeMillis() + preloadTime;
@ -677,7 +676,6 @@ public class kelondroRecords {
}
}
*/
}
public File file() {
@ -761,6 +759,7 @@ public class kelondroRecords {
}
protected synchronized final Node newNode(Handle handle, byte[] bulkchunk, int offset, boolean write) throws IOException {
// bulkchunk must include the OH bytes and handles!
return new Node(handle, bulkchunk, offset, write);
}
@ -853,12 +852,16 @@ public class kelondroRecords {
boolean changed;
if (handle.index >= USAGE.allCount()) {
assert write == true : "handle.index = " + handle.index + ", USAGE.allCount() = " + USAGE.allCount();
USAGE.allocate(handle.index, bulkchunk, offset, write);
changed = false; // this is independent from write
USAGE.allocate(handle.index, bulkchunk, offset + overhead, write);
if ((bulkchunk != null) && (overhead != 0)) {
// write also the OH bytes and handles
entryFile.write(seekpos(this.handle), this.headChunk, 0, overhead);
}
changed = false; // this is independent from write; we have already wrote the record, so it is considered as unchanged
} else {
changed = write;
}
assert ((bulkchunk == null) || (bulkchunk.length >= offset + ROW.width(0))) : "bulkchunk.length = " + bulkchunk.length + ", offset = " + offset + ", ROW.width(0) = " + ROW.width(0);
assert ((bulkchunk == null) || (bulkchunk.length - offset >= recordsize)) : "bulkchunk.length = " + bulkchunk.length + ", offset = " + offset + ", recordsize = " + recordsize;
// create empty chunks
this.headChunk = new byte[headchunksize];
@ -866,8 +869,8 @@ public class kelondroRecords {
// write content to chunks
if (bulkchunk != null) {
System.arraycopy(bulkchunk, offset, this.headChunk, overhead, ROW.width(0));
System.arraycopy(bulkchunk, offset + ROW.width(0), this.tailChunk, 0, tailchunksize);
System.arraycopy(bulkchunk, offset, this.headChunk, 0, headchunksize);
System.arraycopy(bulkchunk, offset + headchunksize, this.tailChunk, 0, tailchunksize);
}
// mark chunks as changed
@ -1395,7 +1398,6 @@ public class kelondroRecords {
public contentNodeIterator(long maxInitTime) throws IOException, kelondroException {
// initialize markedDeleted set of deleted Handles
maxInitTime = -1;// for debugging only
markedDeleted = deletedHandles(maxInitTime);
fullyMarked = (maxInitTime < 0);

Loading…
Cancel
Save