such an entry cannot be instantiated without allocation of new byte[]; instead
it can re-use memory from other kelondroRow.Entry objects.
during bugfixing also other bugs may have been solved, maybe the INCONSISTENCY problem
could have been solved. One cause can be missing synchronization during bulk storage
when a R/W-path optimization is done. To test this case, the optimization is currently
switched off.
More memory enhancements can be done after this initial change to the allocation scheme.
git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@3536 6c8d7289-2bf4-0310-a012-ef5d649a1542
serverLog.logSevere("kelondroCollectionIndex","UPDATE (put) ERROR: array has different chunkcount than index after merge: index = "+(int)indexrow.getColLong(idx_col_chunkcount)+", collection.size() = "+collection.size());
index.put(indexrow);// write modified indexrow
}
@ -567,6 +572,7 @@ public class kelondroCollectionIndex {
serverLog.logSevere("kelondroCollectionIndex","UPDATE (merge) ERROR: array has different chunkcount than index after merge: index = "+indexrowcount+", collection.size() = "+collectionsize);
index.put(indexrow);// write modified indexrow
}
}
@ -911,13 +923,13 @@ public class kelondroCollectionIndex {
@ -933,7 +945,7 @@ public class kelondroCollectionIndex {
serverLog.logSevere("kelondroCollectionIndex."+array.filename,"lost a RowCollection because of a bad arraykey");
returnnewkelondroRowSet(this.payloadrow,0);
}
kelondroRowSetcollection=newkelondroRowSet(this.payloadrow,arrayrow.getColBytes(1));// FIXME: this does not yet work with different rowdef in case of several rowdef.objectsize()
kelondroRowSetcollection=newkelondroRowSet(this.payloadrow,arrayrow, 1);// FIXME: this does not yet work with different rowdef in case of several rowdef.objectsize()
array.logFailure("INCONSISTENCY in " +arrayFile(this.path,this.filenameStub,this.loadfactor,chunksize,clusteridx,serialnumber).toString()+": array has different chunkcount than index: index = "+chunkcount+", array = "+chunkcountInArray+"; the index has been auto-fixed");
array.logFailure("INCONSISTENCY (get) in " +arrayFile(this.path,this.filenameStub,this.loadfactor,chunksize,clusteridx,serialnumber).toString()+": array has different chunkcount than index: index = "+chunkcount+", array = "+chunkcountInArray+"; the index has been auto-fixed");
}
if(remove)array.remove(rownumber,false);// index is removed in calling method
returncollection;
@ -1031,7 +1043,7 @@ public class kelondroCollectionIndex {
entryFile.write(seekpos(index)+overhead,chunk,0,ROW.objectsize());// occupy space, othervise the USAGE computaton does not work
USAGE.USEDC++;
writeused(false);
returnindex;
}else{
// re-use record from free-list
USAGE.USEDC++;
USAGE.FREEC--;
// take link
intindex;
if(USAGE.FREEH.index==NUL){
serverLog.logSevere("kelondroRecords/"+filename,"INTERNAL ERROR (DATA INCONSISTENCY): re-use of records failed, lost "+(USAGE.FREEC+1)+" records.");
// try to heal..
USAGE.USEDC=USAGE.allCount()+1;
USAGE.FREEC=0;
index=USAGE.USEDC-1;
}else{
index=USAGE.FREEH.index;
//System.out.println("*DEBUG* ALLOCATED DELETED INDEX " + index);
// check for valid seek position
longseekp=seekpos(USAGE.FREEH);
if(seekp>entryFile.length()){
// this is a severe inconsistency. try to heal..
serverLog.logSevere("kelondroRecords/"+filename,"new Handle: lost "+USAGE.FREEC+" marked nodes; seek position "+seekp+"/"+USAGE.FREEH.index+" out of file size "+entryFile.length()+"/"+((entryFile.length()-POS_NODES)/recordsize));
index=USAGE.allCount();// a place at the end of the file
USAGE.USEDC+=USAGE.FREEC;// to avoid that non-empty records at the end are overwritten
USAGE.FREEC=0;// discard all possible empty nodes
USAGE.FREEH.index=NUL;
}else{
// read link to next element of FREEH chain
USAGE.FREEH.index=entryFile.readInt(seekp);
}
}
USAGE.writeused(false);
USAGE.writefree();
entryFile.write(seekpos(index)+overhead,chunk,0,ROW.objectsize());// overwrite space
returnindex;
synchronized(entryFile){
if(USAGE.FREEC==0){
// generate new entry
intindex=USAGE.allCount();
entryFile.write(seekpos(index)+overhead,chunk,0,ROW.objectsize());// occupy space, othervise the USAGE computaton does not work
USAGE.USEDC++;
writeused(false);
returnindex;
}else{
// re-use record from free-list
USAGE.USEDC++;
USAGE.FREEC--;
// take link
intindex;
if(USAGE.FREEH.index==NUL){
serverLog.logSevere("kelondroRecords/"+filename,"INTERNAL ERROR (DATA INCONSISTENCY): re-use of records failed, lost "+(USAGE.FREEC+1)+" records.");
// try to heal..
USAGE.USEDC=USAGE.allCount()+1;
USAGE.FREEC=0;
index=USAGE.USEDC-1;
}else{
index=USAGE.FREEH.index;
//System.out.println("*DEBUG* ALLOCATED DELETED INDEX " + index);
// check for valid seek position
longseekp=seekpos(USAGE.FREEH);
if(seekp>entryFile.length()){
// this is a severe inconsistency. try to heal..
serverLog.logSevere("kelondroRecords/"+filename,"new Handle: lost "+USAGE.FREEC+" marked nodes; seek position "+seekp+"/"+USAGE.FREEH.index+" out of file size "+entryFile.length()+"/"+((entryFile.length()-POS_NODES)/recordsize));
index=USAGE.allCount();// a place at the end of the file
USAGE.USEDC+=USAGE.FREEC;// to avoid that non-empty records at the end are overwritten
USAGE.FREEC=0;// discard all possible empty nodes
USAGE.FREEH.index=NUL;
}else{
// read link to next element of FREEH chain
USAGE.FREEH.index=entryFile.readInt(seekp);
}
}
USAGE.writeused(false);
USAGE.writefree();
entryFile.write(seekpos(index)+overhead,chunk,0,ROW.objectsize());// overwrite space
returnindex;
}
}
}
}
@ -325,48 +331,49 @@ public class kelondroRecords {