added option to configure the start-up delay time for kelondro database files.

the start-up delay is used to pre-load the database node cache

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@2276 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 19 years ago
parent ce9dd3e76d
commit 92f4cb4d73

@ -97,7 +97,7 @@ public final class IndexImport_p {
if (startImport) {
dbImporter importerThread = switchboard.dbImportManager.getNewImporter(importType);
if (importerThread != null) {
importerThread.init(new File(importPath),cacheSize);
importerThread.init(new File(importPath), cacheSize, 3000);
importerThread.startIt();
}
prop.put("LOCATION","");

@ -30,6 +30,7 @@ public class dbtest {
public final static int valuelength = 223; // sum of all data length as defined in plasmaURL
//public final static long buffer = 0;
public final static long buffer = 8192 * 1024; // 8 MB buffer
public final static long preload = 1000; // 1 second
public static byte[] dummyvalue2 = new byte[valuelength];
static {
// fill the dummy value
@ -171,23 +172,22 @@ public class dbtest {
if (dbe.equals("kelondroTree")) {
File tablefile = new File(tablename + ".kelondro.db");
if (tablefile.exists()) {
table = new kelondroTree(tablefile, buffer, kelondroTree.defaultObjectCachePercent);
table = new kelondroTree(tablefile, buffer, preload, kelondroTree.defaultObjectCachePercent);
} else {
table = new kelondroTree(tablefile, buffer, kelondroTree.defaultObjectCachePercent, testRow, true);
table = new kelondroTree(tablefile, buffer, preload, kelondroTree.defaultObjectCachePercent, testRow, true);
}
}
if (dbe.equals("kelondroSplittedTree")) {
File tablepath = new File(tablename).getParentFile();
tablename = new File(tablename).getName();
table = kelondroSplittedTree.open(tablepath, tablename, kelondroBase64Order.enhancedCoder,
buffer,
8,
testRow, 1, 80,
buffer, preload,
8, testRow, 1, 80,
true);
}
if (dbe.equals("kelondroFlexTable")) {
File tablepath = new File(tablename).getParentFile();
table = new kelondroFlexTable(tablepath, new File(tablename).getName(), buffer, testRow, true);
table = new kelondroFlexTable(tablepath, new File(tablename).getName(), buffer, preload, testRow, true);
}
if (dbe.equals("mysql")) {
table = new dbTable("mysql", testRow);

@ -80,15 +80,15 @@ public class blogBoard {
private kelondroMap datbase = null;
public blogBoard(File actpath, int bufferkb) {
public blogBoard(File actpath, int bufferkb, long preloadTime) {
new File(actpath.getParent()).mkdir();
if (datbase == null) {
if (actpath.exists()) try {
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x40, '_'));
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x40, preloadTime, '_'));
} catch (IOException e) {
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x400, keyLength, recordSize, '_', true));
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x400, preloadTime, keyLength, recordSize, '_', true));
} else {
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x400, keyLength, recordSize, '_', true));
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x400, preloadTime, keyLength, recordSize, '_', true));
}
}
}

@ -119,7 +119,7 @@ public class bookmarksDB {
return new Date();
}
public bookmarksDB(File bookmarksFile, File tagsFile, File datesFile, int bufferkb){
public bookmarksDB(File bookmarksFile, File tagsFile, File datesFile, int bufferkb, long preloadTime) {
//bookmarks
//check if database exists
tagCache=new HashMap();
@ -127,37 +127,37 @@ public class bookmarksDB {
if(bookmarksFile.exists()){
try {
//open it
this.bookmarksTable=new kelondroMap(new kelondroDyn(bookmarksFile, 1024*bufferkb, '_'));
this.bookmarksTable=new kelondroMap(new kelondroDyn(bookmarksFile, 1024*bufferkb, preloadTime, '_'));
} catch (IOException e) {
//database reset :-((
bookmarksFile.delete();
bookmarksFile.getParentFile().mkdirs();
//urlHash is 12 bytes long
this.bookmarksTable = new kelondroMap(new kelondroDyn(bookmarksFile, bufferkb * 1024, 12, 256, '_', true));
this.bookmarksTable = new kelondroMap(new kelondroDyn(bookmarksFile, bufferkb * 1024, preloadTime, 12, 256, '_', true));
}
}else{
//new database
bookmarksFile.getParentFile().mkdirs();
this.bookmarksTable = new kelondroMap(new kelondroDyn(bookmarksFile, bufferkb * 1024, 12, 256, '_', true));
this.bookmarksTable = new kelondroMap(new kelondroDyn(bookmarksFile, bufferkb * 1024, preloadTime, 12, 256, '_', true));
}
//tags
//check if database exists
if(tagsFile.exists()){
try {
//open it
this.tagsTable=new kelondroMap(new kelondroDyn(tagsFile, 1024*bufferkb, '_'));
this.tagsTable=new kelondroMap(new kelondroDyn(tagsFile, 1024*bufferkb, preloadTime, '_'));
} catch (IOException e) {
//reset database
tagsFile.delete();
tagsFile.getParentFile().mkdirs();
// max. 128 byte long tags
this.tagsTable = new kelondroMap(new kelondroDyn(tagsFile, bufferkb * 1024, 12, 256, '_', true));
this.tagsTable = new kelondroMap(new kelondroDyn(tagsFile, bufferkb * 1024, preloadTime, 12, 256, '_', true));
rebuildTags();
}
}else{
//new database
tagsFile.getParentFile().mkdirs();
this.tagsTable = new kelondroMap(new kelondroDyn(tagsFile, bufferkb * 1024, 12, 256, '_', true));
this.tagsTable = new kelondroMap(new kelondroDyn(tagsFile, bufferkb * 1024, preloadTime, 12, 256, '_', true));
rebuildTags();
}
// dates
@ -165,19 +165,19 @@ public class bookmarksDB {
if(datesFile.exists()){
try {
//open it
this.datesTable=new kelondroMap(new kelondroDyn(datesFile, 1024*bufferkb, '_'));
this.datesTable=new kelondroMap(new kelondroDyn(datesFile, 1024*bufferkb, preloadTime, '_'));
} catch (IOException e) {
//reset database
datesFile.delete();
datesFile.getParentFile().mkdirs();
//YYYY-MM-DDTHH:mm:ssZ = 20 byte. currently used: YYYY-MM-DD = 10 bytes
this.datesTable = new kelondroMap(new kelondroDyn(datesFile, bufferkb * 1024, 20, 256, '_', true));
this.datesTable = new kelondroMap(new kelondroDyn(datesFile, bufferkb * 1024, preloadTime, 20, 256, '_', true));
rebuildDates();
}
}else{
//new database
datesFile.getParentFile().mkdirs();
this.datesTable = new kelondroMap(new kelondroDyn(datesFile, bufferkb * 1024, 20, 256, '_', true));
this.datesTable = new kelondroMap(new kelondroDyn(datesFile, bufferkb * 1024, preloadTime, 20, 256, '_', true));
rebuildDates();
}
}

@ -67,16 +67,16 @@ public class messageBoard {
private kelondroMap database = null;
private int sn = 0;
public messageBoard(File path, int bufferkb) {
public messageBoard(File path, int bufferkb, long preloadTime) {
new File(path.getParent()).mkdir();
if (database == null) {
if (path.exists()) try {
database = new kelondroMap(new kelondroDyn(path, bufferkb * 0x400, '_'));
database = new kelondroMap(new kelondroDyn(path, bufferkb * 0x400, preloadTime, '_'));
} catch (IOException e) {
path.delete();
database = new kelondroMap(new kelondroDyn(path, bufferkb * 0x400, categoryLength + dateFormat.length() + 2, recordSize, '_', true));
database = new kelondroMap(new kelondroDyn(path, bufferkb * 0x400, preloadTime, categoryLength + dateFormat.length() + 2, recordSize, '_', true));
} else {
database = new kelondroMap(new kelondroDyn(path, bufferkb * 0x400, categoryLength + dateFormat.length() + 2, recordSize, '_', true));
database = new kelondroMap(new kelondroDyn(path, bufferkb * 0x400, preloadTime, categoryLength + dateFormat.length() + 2, recordSize, '_', true));
}
}
sn = 0;

@ -68,27 +68,29 @@ public final class userDB {
kelondroMap userTable;
private final File userTableFile;
private final int bufferkb;
private long preloadTime;
private HashMap ipUsers = new HashMap();
private HashMap cookieUsers = new HashMap();
public userDB(File userTableFile, int bufferkb) {
public userDB(File userTableFile, int bufferkb, long preloadTime) {
this.userTableFile = userTableFile;
this.bufferkb = bufferkb;
this.preloadTime = preloadTime;
if (userTableFile.exists()) {
try {
this.userTable = new kelondroMap(new kelondroDyn(userTableFile, bufferkb * 1024, '_'));
this.userTable = new kelondroMap(new kelondroDyn(userTableFile, bufferkb * 1024, preloadTime, '_'));
} catch (kelondroException e) {
userTableFile.delete();
userTableFile.getParentFile().mkdirs();
this.userTable = new kelondroMap(new kelondroDyn(userTableFile, bufferkb * 1024, 128, 256, '_', true));
this.userTable = new kelondroMap(new kelondroDyn(userTableFile, bufferkb * 1024, preloadTime, 128, 256, '_', true));
} catch (IOException e) {
userTableFile.delete();
userTableFile.getParentFile().mkdirs();
this.userTable = new kelondroMap(new kelondroDyn(userTableFile, bufferkb * 1024, 128, 256, '_', true));
this.userTable = new kelondroMap(new kelondroDyn(userTableFile, bufferkb * 1024, preloadTime, 128, 256, '_', true));
}
} else {
userTableFile.getParentFile().mkdirs();
this.userTable = new kelondroMap(new kelondroDyn(userTableFile, bufferkb * 1024, 128, 256, '_', true));
this.userTable = new kelondroMap(new kelondroDyn(userTableFile, bufferkb * 1024, preloadTime, 128, 256, '_', true));
}
}
@ -107,7 +109,7 @@ public final class userDB {
} catch (IOException e) {}
if (!(userTableFile.delete())) throw new RuntimeException("cannot delete user database");
userTableFile.getParentFile().mkdirs();
userTable = new kelondroMap(new kelondroDyn(userTableFile, this.bufferkb, 256, 512, '_', true));
userTable = new kelondroMap(new kelondroDyn(userTableFile, this.bufferkb, preloadTime, 256, 512, '_', true));
}
public void close() {

@ -69,25 +69,25 @@ public class wikiBoard {
private kelondroMap bkpbase = null;
private static HashMap authors = new HashMap();
public wikiBoard(File actpath, File bkppath, int bufferkb) {
public wikiBoard(File actpath, File bkppath, int bufferkb, long preloadTime) {
new File(actpath.getParent()).mkdir();
if (datbase == null) {
if (actpath.exists()) try {
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x40, '_'));
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x40, preloadTime, '_'));
} catch (IOException e) {
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x400, keyLength, recordSize, '_', true));
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x400, preloadTime, keyLength, recordSize, '_', true));
} else {
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x400, keyLength, recordSize, '_', true));
datbase = new kelondroMap(new kelondroDyn(actpath, bufferkb / 2 * 0x400, preloadTime, keyLength, recordSize, '_', true));
}
}
new File(bkppath.getParent()).mkdir();
if (bkpbase == null) {
if (bkppath.exists()) try {
bkpbase = new kelondroMap(new kelondroDyn(bkppath, bufferkb / 2 * 0x400, '_'));
bkpbase = new kelondroMap(new kelondroDyn(bkppath, bufferkb / 2 * 0x400, preloadTime, '_'));
} catch (IOException e) {
bkpbase = new kelondroMap(new kelondroDyn(bkppath, bufferkb / 2 * 0x400, keyLength + dateFormat.length(), recordSize, '_', true));
bkpbase = new kelondroMap(new kelondroDyn(bkppath, bufferkb / 2 * 0x400, preloadTime, keyLength + dateFormat.length(), recordSize, '_', true));
} else {
bkpbase = new kelondroMap(new kelondroDyn(bkppath, bufferkb / 2 * 0x400, keyLength + dateFormat.length(), recordSize, '_', true));
bkpbase = new kelondroMap(new kelondroDyn(bkppath, bufferkb / 2 * 0x400, preloadTime, keyLength + dateFormat.length(), recordSize, '_', true));
}
}
}

@ -12,12 +12,12 @@ public class indexCollectionRI extends indexAbstractRI implements indexRI {
kelondroCollectionIndex collectionIndex;
public indexCollectionRI(File path, String filenameStub, long buffersize) throws IOException {
public indexCollectionRI(File path, String filenameStub, long buffersize, long preloadTime) throws IOException {
kelondroRow rowdef = new kelondroRow(new int[]{});
collectionIndex = new kelondroCollectionIndex(
path, filenameStub, 9 /*keyLength*/,
kelondroNaturalOrder.naturalOrder, buffersize,
kelondroNaturalOrder.naturalOrder, buffersize, preloadTime,
4 /*loadfactor*/, rowdef, 8 /*partitions*/);
}

@ -57,7 +57,8 @@ public class kelondroCollectionIndex {
return (int) (time / day) - 10957;
}
public kelondroCollectionIndex(File path, String filenameStub, int keyLength, kelondroOrder indexOrder, long buffersize,
public kelondroCollectionIndex(File path, String filenameStub, int keyLength, kelondroOrder indexOrder,
long buffersize, long preloadTime,
int loadfactor, kelondroRow rowdef, int partitions) throws IOException {
this.path = path;
this.filenameStub = filenameStub;
@ -73,7 +74,7 @@ public class kelondroCollectionIndex {
columns[2] = 4; // chunkcount (number of chunks in this collection)
columns[3] = 4; // index (position in index file)
columns[4] = 2; // update time in days since 1.1.2000
index = new kelondroSplittedTree(path, filenameStub, indexOrder, buffersize, 8, new kelondroRow(columns), 1, 80, true);
index = new kelondroSplittedTree(path, filenameStub, indexOrder, buffersize, preloadTime, 8, new kelondroRow(columns), 1, 80, true);
// create array files
this.array = new kelondroFixedWidthArray[partitions];

@ -68,15 +68,15 @@ public class kelondroDyn extends kelondroTree {
private char fillChar;
private kelondroObjectBuffer buffer;
public kelondroDyn(File file, long buffersize /*bytes*/, int key, int nodesize, char fillChar, boolean exitOnFail) {
this(file, buffersize, key, nodesize, fillChar, new kelondroNaturalOrder(true), exitOnFail);
public kelondroDyn(File file, long buffersize /*bytes*/, long preloadTime, int key, int nodesize, char fillChar, boolean exitOnFail) {
this(file, buffersize, preloadTime, key, nodesize, fillChar, new kelondroNaturalOrder(true), exitOnFail);
}
public kelondroDyn(File file, long buffersize /* bytes */, int key,
public kelondroDyn(File file, long buffersize /* bytes */, long preloadTime, int key,
int nodesize, char fillChar, kelondroOrder objectOrder,
boolean exitOnFail) {
// creates a new dynamic tree
super(file, buffersize, kelondroTree.defaultObjectCachePercent, new kelondroRow(new int[] { key + counterlen, nodesize }), objectOrder, 1, 8, exitOnFail);
super(file, buffersize, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(new int[] { key + counterlen, nodesize }), objectOrder, 1, 8, exitOnFail);
this.keylen = row().width(0) - counterlen;
this.reclen = row().width(1);
this.fillChar = fillChar;
@ -85,9 +85,9 @@ public class kelondroDyn extends kelondroTree {
buffer = new kelondroObjectBuffer(file.toString());
}
public kelondroDyn(File file, long buffersize, char fillChar) throws IOException {
public kelondroDyn(File file, long buffersize, long preloadTime, char fillChar) throws IOException {
// this opens a file with an existing dynamic tree
super(file, buffersize, kelondroTree.defaultObjectCachePercent);
super(file, buffersize, preloadTime, kelondroTree.defaultObjectCachePercent);
this.keylen = row().width(0) - counterlen;
this.reclen = row().width(1);
this.fillChar = fillChar;
@ -434,8 +434,7 @@ public class kelondroDyn extends kelondroTree {
} else if (args.length == 1) {
// open a db and list keys
try {
kelondroDyn kd = new kelondroDyn(new File(args[0]), 0x100000,
'_');
kelondroDyn kd = new kelondroDyn(new File(args[0]), 0x100000, 0, '_');
System.out.println(kd.size() + " elements in DB");
Iterator i = kd.dynKeys(true, false);
while (i.hasNext())
@ -453,9 +452,9 @@ public class kelondroDyn extends kelondroTree {
kelondroDyn kd;
try {
if (db.exists())
kd = new kelondroDyn(db, 0x100000, '_');
kd = new kelondroDyn(db, 0x100000, 0, '_');
else
kd = new kelondroDyn(db, 0x100000, 80, 200, '_', true);
kd = new kelondroDyn(db, 0x100000, 0, 80, 200, '_', true);
if (writeFile)
kd.readFile(key, f);
else
@ -479,7 +478,7 @@ public class kelondroDyn extends kelondroTree {
int steps = 0;
while (true) {
if (testFile.exists()) testFile.delete();
tt = new kelondroDyn(testFile, 0, 4 ,100, '_', true);
tt = new kelondroDyn(testFile, 0, 0, 4 ,100, '_', true);
steps = ((int) System.currentTimeMillis() % 7) * (((int) System.currentTimeMillis() + 17) % 11);
t = s;
d = "";

@ -64,33 +64,38 @@ public class kelondroDynTree {
//private long maxageCache = 60000, cycletimeCache = 10000;
private long maxageBuffer = 60000, cycletimeBuffer = 10000;
private long buffersize = 0;
private long preloadTime = 0;
// data structures for the cache and buffer
private Hashtable buffer, cache;
private long cycleBuffer;
public kelondroDynTree(File file, long buffersize, int keylength, int nodesize, kelondroRow rowdef, char fillChar, boolean exitOnFail) {
public kelondroDynTree(File file, long buffersize, long preloadTime, int keylength, int nodesize, kelondroRow rowdef, char fillChar, boolean exitOnFail) {
// creates a new DynTree
this.file = file;
this.buffersize = buffersize;
this.preloadTime = preloadTime;
this.rowdef = rowdef;
this.buffer = new Hashtable();
this.cache = new Hashtable();
//this.cycleCache = Long.MIN_VALUE;
this.cycleBuffer = Long.MIN_VALUE;
if (file.exists()) file.delete();
this.table = new kelondroDyn(file, buffersize, keylength, nodesize, fillChar, exitOnFail);
this.table = new kelondroDyn(file, buffersize, preloadTime, keylength, nodesize, fillChar, exitOnFail);
this.treeRAHandles = new Hashtable();
}
public kelondroDynTree(File file, long buffersize, char fillChar) throws IOException {
public kelondroDynTree(File file, long buffersize, long preloadTime, char fillChar) throws IOException {
// opens an existing DynTree
this.file = file;
this.buffersize = buffersize;
this.preloadTime = preloadTime;
this.buffer = new Hashtable();
this.cache = new Hashtable();
//this.cycleCache = Long.MIN_VALUE;
this.cycleBuffer = Long.MIN_VALUE;
if (!(file.exists())) throw new IOException("DynTree " + file.toString() + " does not exist");
this.table = new kelondroDyn(file, buffersize, fillChar);
this.table = new kelondroDyn(file, buffersize, preloadTime, fillChar);
// read one element to measure the size of columns
if (table.size() == 0) throw new IOException("DynTree " + file.toString() + " is empty. Should not.");
this.treeRAHandles = new Hashtable();
@ -127,7 +132,7 @@ public class kelondroDynTree {
kelondroRA ra = table.getRA(key); // works always, even with no-existing entry
treeRAHandles.put(key, ra);
try {
return new kelondroTree(ra, buffersize, kelondroTree.defaultObjectCachePercent, rowdef, false);
return new kelondroTree(ra, buffersize, preloadTime, kelondroTree.defaultObjectCachePercent, rowdef, false);
} catch (RuntimeException e) {
throw new IOException(e.getMessage());
}
@ -137,7 +142,7 @@ public class kelondroDynTree {
if (table.existsDyn(key)) {
kelondroRA ra = table.getRA(key);
treeRAHandles.put(key, ra);
return new kelondroTree(ra, buffersize, kelondroTree.defaultObjectCachePercent);
return new kelondroTree(ra, buffersize, preloadTime, kelondroTree.defaultObjectCachePercent);
} else {
return null;
}
@ -319,10 +324,10 @@ public class kelondroDynTree {
System.out.println("start");
File file = new File("D:\\bin\\testDyn.db");
if (file.exists()) {
kelondroDynTree dt = new kelondroDynTree(file, 0x100000L, '_');
kelondroDynTree dt = new kelondroDynTree(file, 0x100000L, 0, '_');
System.out.println("opened: table keylength=" + dt.table.row().width(0) + ", sectorsize=" + dt.table.row().width(1) + ", " + dt.table.size() + " entries.");
} else {
kelondroDynTree dt = new kelondroDynTree(file, 0x100000L, 16, 512, new kelondroRow(new int[] {10,20,30}), '_', true);
kelondroDynTree dt = new kelondroDynTree(file, 0x100000L, 0, 16, 512, new kelondroRow(new int[] {10,20,30}), '_', true);
String name;
kelondroTree t;
kelondroRow.Entry line;

@ -57,7 +57,7 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
public kelondroFixedWidthArray(File file, kelondroRow rowdef, int intprops, boolean exitOnFail) {
// this creates a new array
super(file, 0, thisOHBytes, thisOHHandles, rowdef, intprops, rowdef.columns() /* txtProps */, 80 /* txtPropWidth */, exitOnFail);
super(file, 0, 0, thisOHBytes, thisOHHandles, rowdef, intprops, rowdef.columns() /* txtProps */, 80 /* txtPropWidth */, exitOnFail);
for (int i = 0; i < intprops; i++) try {
setHandle(i, new Handle(0));
} catch (IOException e) {
@ -69,7 +69,7 @@ public class kelondroFixedWidthArray extends kelondroRecords implements kelondro
public kelondroFixedWidthArray(File file) throws IOException{
// this opens a file with an existing array
super(file, 0);
super(file, 0, 0);
}
public synchronized kelondroRow.Entry set(int index, kelondroRow.Entry rowentry) throws IOException {

@ -33,7 +33,7 @@ public class kelondroFlexTable extends kelondroFlexWidthArray implements kelondr
private kelondroBytesIntMap index;
public kelondroFlexTable(File path, String tablename, long buffersize, kelondroRow rowdef, boolean exitOnFail) throws IOException {
public kelondroFlexTable(File path, String tablename, long buffersize, long preloadTime, kelondroRow rowdef, boolean exitOnFail) throws IOException {
super(path, tablename, rowdef, exitOnFail);
File newpath = new File(path, tablename + ".table");
File indexfile = new File(newpath, "col.000.index");
@ -45,11 +45,11 @@ public class kelondroFlexTable extends kelondroFlexWidthArray implements kelondr
if (indexfile.exists()) {
// use existing index file
System.out.println("*** Using File index " + indexfile);
ki = new kelondroTree(indexfile, buffersize, 10);
ki = new kelondroTree(indexfile, buffersize, preloadTime, 10);
} else if (size() > 100000) {
// generate new index file
System.out.print("*** Generating File index for " + size() + " entries from " + indexfile);
ki = initializeTreeIndex(indexfile, buffersize);
ki = initializeTreeIndex(indexfile, buffersize, preloadTime);
System.out.println(" -done-");
System.out.println(ki.size()
@ -98,8 +98,8 @@ public class kelondroFlexTable extends kelondroFlexWidthArray implements kelondr
}
private kelondroIndex initializeTreeIndex(File indexfile, long buffersize) throws IOException {
kelondroTree index = new kelondroTree(indexfile, buffersize, 10, rowdef.width(0), 4, true);
private kelondroIndex initializeTreeIndex(File indexfile, long buffersize, long preloadTime) throws IOException {
kelondroTree index = new kelondroTree(indexfile, buffersize, preloadTime, 10, rowdef.width(0), 4, true);
Iterator content = super.col[0].contentNodes();
kelondroRecords.Node node;
kelondroRow.Entry indexentry;

@ -61,45 +61,52 @@ public class kelondroMapTable {
if (!(tablesPath.exists())) tablesPath.mkdirs();
}
public void declareMaps(String tablename, int keysize, int nodesize, char fillChar, boolean exitOnFail) {
public void declareMaps(
String tablename, int keysize, int nodesize,
char fillChar, boolean exitOnFail) {
declareMaps(tablename, keysize, nodesize, null, null, fillChar, exitOnFail);
}
public void declareMaps(String tablename, int keysize, int nodesize, String[] sortfields, String[] accfields, char fillChar, boolean exitOnFail) {
declareMaps(tablename, keysize, nodesize, sortfields, accfields, fillChar, 0x800, exitOnFail);
public void declareMaps(
String tablename, int keysize, int nodesize,
String[] sortfields, String[] accfields, char fillChar, boolean exitOnFail) {
declareMaps(tablename, keysize, nodesize, sortfields, accfields, fillChar, 0x800, 0, exitOnFail);
}
public void declareMaps(String tablename, int keysize, int nodesize, String[] sortfields, String[] accfields, char fillChar, long buffersize /*bytes*/, boolean exitOnFail) {
public void declareMaps(
String tablename, int keysize, int nodesize,
String[] sortfields, String[] accfields, char fillChar,
long buffersize /*bytes*/, long preloadTime, boolean exitOnFail) {
if (mTables.containsKey(tablename)) throw new RuntimeException("kelondroTables.declareMap: table '" + tablename + "' declared twice.");
if (tTables.containsKey(tablename)) throw new RuntimeException("kelondroTables.declareMap: table '" + tablename + "' declared already in other context.");
File tablefile = new File(tablesPath, "table." + tablename + ".mdb");
kelondroDyn dyn;
if (tablefile.exists()) try {
dyn = new kelondroDyn(tablefile, buffersize, fillChar);
dyn = new kelondroDyn(tablefile, buffersize, preloadTime, fillChar);
} catch (IOException e) {
tablefile.getParentFile().mkdirs();
dyn = new kelondroDyn(tablefile, buffersize, keysize, nodesize, fillChar, exitOnFail);
dyn = new kelondroDyn(tablefile, buffersize, preloadTime, keysize, nodesize, fillChar, exitOnFail);
} else {
tablefile.getParentFile().mkdirs();
dyn = new kelondroDyn(tablefile, buffersize, keysize, nodesize, fillChar, exitOnFail);
dyn = new kelondroDyn(tablefile, buffersize, preloadTime, keysize, nodesize, fillChar, exitOnFail);
}
kelondroMap map = new kelondroMap(dyn, sortfields, accfields);
mTables.put(tablename, map);
}
public void declareTree(String tablename, kelondroRow rowdef, long buffersize /*bytes*/, boolean exitOnFail) {
public void declareTree(String tablename, kelondroRow rowdef, long buffersize /*bytes*/, long preloadTime, boolean exitOnFail) {
if (mTables.containsKey(tablename)) throw new RuntimeException("kelondroTables.declareTree: table '" + tablename + "' declared already in other context.");
if (tTables.containsKey(tablename)) throw new RuntimeException("kelondroTables.declareTree: table '" + tablename + "' declared twice.");
File tablefile = new File(tablesPath, "table." + tablename + ".tdb");
kelondroTree Tree;
if (tablefile.exists()) try {
Tree = new kelondroTree(tablefile, buffersize, kelondroTree.defaultObjectCachePercent);
Tree = new kelondroTree(tablefile, buffersize, preloadTime, kelondroTree.defaultObjectCachePercent);
} catch (IOException e) {
tablefile.getParentFile().mkdirs();
Tree = new kelondroTree(tablefile, buffersize, kelondroTree.defaultObjectCachePercent, rowdef, exitOnFail);
Tree = new kelondroTree(tablefile, buffersize, preloadTime, kelondroTree.defaultObjectCachePercent, rowdef, exitOnFail);
} else {
tablefile.getParentFile().mkdirs();
Tree = new kelondroTree(tablefile, buffersize, kelondroTree.defaultObjectCachePercent, rowdef, exitOnFail);
Tree = new kelondroTree(tablefile, buffersize, preloadTime, kelondroTree.defaultObjectCachePercent, rowdef, exitOnFail);
}
tTables.put(tablename, Tree);
}

@ -82,7 +82,6 @@ public class kelondroRecords {
private static final int NUL = Integer.MIN_VALUE; // the meta value for the kelondroRecords' NUL abstraction
private static final long memBlock = 500000; // do not fill cache further if the amount of available memory is less that this
public final static boolean useWriteBuffer = false;
public final static long preloadCacheTime = 500; // time that can be wasted to initialize the node cache
// memory calculation
private static final int element_in_cache = 4; // for kelondroCollectionObjectMap: 4; for HashMap: 52
@ -186,7 +185,7 @@ public class kelondroRecords {
}
}
public kelondroRecords(File file, long buffersize /* bytes */,
public kelondroRecords(File file, long buffersize /* bytes */, long preloadTime,
short ohbytec, short ohhandlec,
kelondroRow rowdef, int FHandles, int txtProps, int txtPropWidth,
boolean exitOnFail) {
@ -209,10 +208,10 @@ public class kelondroRecords {
if (exitOnFail)
System.exit(-1);
}
initCache(buffersize / 10 * 9);
initCache(buffersize / 10 * 9, preloadTime);
}
public kelondroRecords(kelondroRA ra, long buffersize /* bytes */,
public kelondroRecords(kelondroRA ra, long buffersize /* bytes */, long preloadTime,
short ohbytec, short ohhandlec,
kelondroRow rowdef, int FHandles, int txtProps, int txtPropWidth,
boolean exitOnFail) {
@ -223,7 +222,7 @@ public class kelondroRecords {
logFailure("cannot create / " + e.getMessage());
if (exitOnFail) System.exit(-1);
}
initCache(buffersize / 10 * 9);
initCache(buffersize / 10 * 9, preloadTime);
}
private void initNewFile(kelondroRA ra, short ohbytec, short ohhandlec,
@ -344,7 +343,7 @@ public class kelondroRecords {
this.USAGE.write();
}
public kelondroRecords(File file, long buffersize) throws IOException{
public kelondroRecords(File file, long buffersize, long preloadTime) throws IOException{
// opens an existing tree
assert (file.exists()): "file " + file.getAbsoluteFile().toString() + " does not exist";
this.filename = file.getCanonicalPath();
@ -353,13 +352,13 @@ public class kelondroRecords {
//kelondroRA raf = new kelondroCachedRA(new kelondroFileRA(this.filename), 5000000, 1000);
//kelondroRA raf = new kelondroNIOFileRA(this.filename, (file.length() < 4000000), 10000);
initExistingFile(raf, buffersize / 10);
initCache(buffersize / 10 * 9);
initCache(buffersize / 10 * 9, preloadTime);
}
public kelondroRecords(kelondroRA ra, long buffersize) throws IOException{
public kelondroRecords(kelondroRA ra, long buffersize, long preloadTime) throws IOException{
this.filename = null;
initExistingFile(ra, buffersize / 10);
initCache(buffersize / 10 * 9);
initCache(buffersize / 10 * 9, preloadTime);
}
private void initExistingFile(kelondroRA ra, long writeBufferSize) throws IOException {
@ -410,7 +409,7 @@ public class kelondroRecords {
this.tailchunksize = this.recordsize - this.headchunksize;
}
private void initCache(long buffersize) {
private void initCache(long buffersize, long preloadTime) {
if (buffersize <= 0) {
this.cacheSize = 0;
this.cacheHeaders = null;
@ -427,8 +426,8 @@ public class kelondroRecords {
this.cacheFlush = 0;
// pre-load node cache
if ((preloadCacheTime > 0) && (cacheSize > 0)) {
long stop = System.currentTimeMillis() + preloadCacheTime;
if ((preloadTime > 0) && (cacheSize > 0)) {
long stop = System.currentTimeMillis() + preloadTime;
Iterator i = contentNodes();
Node n;
int count = 0;

@ -73,10 +73,8 @@ public class kelondroSplittedTree implements kelondroIndex {
}
public kelondroSplittedTree(File pathToFiles, String filenameStub, kelondroOrder objectOrder,
long buffersize,
int forkfactor,
kelondroRow rowdef,
int txtProps, int txtPropsWidth,
long buffersize, long preloadTime,
int forkfactor, kelondroRow rowdef, int txtProps, int txtPropsWidth,
boolean exitOnFail) {
ktfs = new kelondroTree[forkfactor];
File f;
@ -84,15 +82,15 @@ public class kelondroSplittedTree implements kelondroIndex {
f = dbFile(pathToFiles, filenameStub, forkfactor, rowdef.columns(), i);
if (f.exists()) {
try {
ktfs[i] = new kelondroTree(f, buffersize/forkfactor, kelondroTree.defaultObjectCachePercent);
ktfs[i] = new kelondroTree(f, buffersize/forkfactor, preloadTime / forkfactor, kelondroTree.defaultObjectCachePercent);
this.order = ktfs[i].order();
} catch (IOException e) {
ktfs[i] = new kelondroTree(f, buffersize/forkfactor, kelondroTree.defaultObjectCachePercent,
ktfs[i] = new kelondroTree(f, buffersize/forkfactor, preloadTime / forkfactor, kelondroTree.defaultObjectCachePercent,
rowdef, objectOrder, txtProps, txtPropsWidth, exitOnFail);
this.order = objectOrder;
}
} else {
ktfs[i] = new kelondroTree(f, buffersize/forkfactor, kelondroTree.defaultObjectCachePercent,
ktfs[i] = new kelondroTree(f, buffersize/forkfactor, preloadTime / forkfactor, kelondroTree.defaultObjectCachePercent,
rowdef, objectOrder, txtProps, txtPropsWidth, exitOnFail);
this.order = objectOrder;
}
@ -101,29 +99,28 @@ public class kelondroSplittedTree implements kelondroIndex {
}
public kelondroSplittedTree(File pathToFiles, String filenameStub, kelondroOrder objectOrder,
long buffersize, int forkfactor, int columns) throws IOException {
long buffersize, long preloadTime, int forkfactor, int columns) throws IOException {
ktfs = new kelondroTree[forkfactor];
for (int i = 0; i < forkfactor; i++) {
ktfs[i] = new kelondroTree(dbFile(pathToFiles, filenameStub, forkfactor, columns, i), buffersize/forkfactor, kelondroTree.defaultObjectCachePercent);
ktfs[i] = new kelondroTree(dbFile(pathToFiles, filenameStub, forkfactor, columns, i),
buffersize/forkfactor, preloadTime / forkfactor, kelondroTree.defaultObjectCachePercent);
}
ff = forkfactor;
this.order = objectOrder;
}
public static kelondroSplittedTree open(File pathToFiles, String filenameStub, kelondroOrder objectOrder,
long buffersize,
int forkfactor,
kelondroRow rowdef, int txtProps, int txtPropsWidth,
long buffersize, long preloadTime,
int forkfactor, kelondroRow rowdef, int txtProps, int txtPropsWidth,
boolean exitOnFail) throws IOException {
// generated a new splittet tree if it not exists or
// opens an existing one
if (existsAll(pathToFiles, filenameStub, forkfactor, rowdef.columns())) {
return new kelondroSplittedTree(pathToFiles, filenameStub, objectOrder, buffersize, forkfactor, rowdef.columns());
return new kelondroSplittedTree(pathToFiles, filenameStub, objectOrder, buffersize, preloadTime, forkfactor, rowdef.columns());
} else {
return new kelondroSplittedTree(pathToFiles, filenameStub, objectOrder,
buffersize,
forkfactor,
rowdef, txtProps, txtPropsWidth,
buffersize, preloadTime,
forkfactor, rowdef, txtProps, txtPropsWidth,
exitOnFail);
}
}

@ -67,13 +67,13 @@ public final class kelondroStack extends kelondroRecords {
private static int root = 0; // pointer for FHandles-array: pointer to root node
private static int toor = 1; // pointer for FHandles-array: pointer to root node
public kelondroStack(File file, long buffersize, int key, int value, boolean exitOnFail) {
this(file, buffersize, new kelondroRow(new int[] { key, value }), exitOnFail);
public kelondroStack(File file, int key, int value, boolean exitOnFail) {
this(file, new kelondroRow(new int[] { key, value }), exitOnFail);
}
public kelondroStack(File file, long buffersize, kelondroRow rowdef, boolean exitOnFail) {
public kelondroStack(File file, kelondroRow rowdef, boolean exitOnFail) {
// this creates a new stack
super(file, buffersize, thisOHBytes, thisOHHandles, rowdef, thisFHandles, rowdef.columns() /* txtProps */, 80 /* txtPropWidth */, exitOnFail);
super(file, 0, 0, thisOHBytes, thisOHHandles, rowdef, thisFHandles, rowdef.columns() /* txtProps */, 80 /* txtPropWidth */, exitOnFail);
try {
setHandle(root, null); // define the root value
setHandle(toor, null); // define the toor value
@ -84,9 +84,9 @@ public final class kelondroStack extends kelondroRecords {
}
}
public kelondroStack(File file, long buffersize) throws IOException{
public kelondroStack(File file) throws IOException{
// this opens a file with an existing stack
super(file, buffersize);
super(file, 0, 0);
if ((getHandle(root) == null) && (getHandle(toor) == null)) clear();
}
@ -99,7 +99,6 @@ public final class kelondroStack extends kelondroRecords {
public static kelondroStack reset(kelondroStack stack) {
// memorize settings to this file
File f = new File(stack.filename);
long bz = stack.cacheNodeStatus()[0] * stack.cacheNodeChunkSize();
kelondroRow row = stack.row();
// close and delete the file
@ -107,7 +106,7 @@ public final class kelondroStack extends kelondroRecords {
if (f.exists()) f.delete();
// re-open a database with same settings as before
return new kelondroStack(f, bz, row, true);
return new kelondroStack(f, row, true);
}
public class Counter implements Iterator {
@ -367,12 +366,12 @@ public final class kelondroStack extends kelondroRecords {
System.err.println("( create, push, view, (g)pop, imp, shell)");
System.exit(0);
} else if (args.length == 2) {
kelondroStack fm = new kelondroStack(new File(args[1]), 0x100000);
kelondroStack fm = new kelondroStack(new File(args[1]));
if (args[0].equals("-v")) {
fm.print();
ret = null;
} else if (args[0].equals("-g")) {
fm = new kelondroStack(new File(args[1]), 0x100000);
fm = new kelondroStack(new File(args[1]));
kelondroRow.Entry ret2 = fm.pop();
ret = ((ret2 == null) ? null : ret2.getColBytes(1));
fm.close();
@ -380,7 +379,7 @@ public final class kelondroStack extends kelondroRecords {
fm.close();
} else if (args.length == 3) {
if (args[0].equals("-i")) {
kelondroStack fm = new kelondroStack(new File(args[2]), 0x100000);
kelondroStack fm = new kelondroStack(new File(args[2]));
int i = fm.imp(new File(args[1]),";");
fm.close();
ret = (i + " records imported").getBytes();
@ -404,7 +403,7 @@ public final class kelondroStack extends kelondroRecords {
if (f != null) try {f.close();}catch(Exception e) {}
}
} else if (args[0].equals("-g")) {
kelondroStack fm = new kelondroStack(new File(args[2]), 0x100000);
kelondroStack fm = new kelondroStack(new File(args[2]));
kelondroRow.Entry ret2 = fm.pop(Integer.parseInt(args[1]));
ret = ((ret2 == null) ? null : ret2.getColBytes(1));
fm.close();
@ -415,10 +414,10 @@ public final class kelondroStack extends kelondroRecords {
File f = new File(args[3]);
if (f.exists()) f.delete();
kelondroRow lens = new kelondroRow(new int[]{Integer.parseInt(args[1]), Integer.parseInt(args[2])});
kelondroStack fm = new kelondroStack(f, 0x100000, lens, true);
kelondroStack fm = new kelondroStack(f, lens, true);
fm.close();
} else if (args[0].equals("-p")) {
kelondroStack fm = new kelondroStack(new File(args[3]), 0x100000);
kelondroStack fm = new kelondroStack(new File(args[3]));
fm.push(fm.row().newEntry(new byte[][] {args[1].getBytes(), args[2].getBytes()}));
fm.close();
}

@ -92,19 +92,19 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
private kelondroObjectCache objectCache;
public kelondroTree(File file, long buffersize, int objectCachePercent, int key, int value, boolean exitOnFail) {
this(file, buffersize, objectCachePercent, new kelondroRow(new int[] { key, value }), new kelondroNaturalOrder(true), 1, 8, exitOnFail);
public kelondroTree(File file, long buffersize, long preloadTime, int objectCachePercent, int key, int value, boolean exitOnFail) {
this(file, buffersize, preloadTime, objectCachePercent, new kelondroRow(new int[] { key, value }), new kelondroNaturalOrder(true), 1, 8, exitOnFail);
}
public kelondroTree(File file, long buffersize, int objectCachePercent, kelondroRow rowdef, boolean exitOnFail) {
public kelondroTree(File file, long buffersize, long preloadTime, int objectCachePercent, kelondroRow rowdef, boolean exitOnFail) {
// this creates a new tree file
this(file, buffersize, objectCachePercent, rowdef, new kelondroNaturalOrder(true), rowdef.columns() /* txtProps */, 80 /* txtPropWidth */, exitOnFail);
this(file, buffersize, preloadTime, objectCachePercent, rowdef, new kelondroNaturalOrder(true), rowdef.columns() /* txtProps */, 80 /* txtPropWidth */, exitOnFail);
}
public kelondroTree(File file, long buffersize, int objectCachePercent, kelondroRow rowdef, kelondroOrder objectOrder, int txtProps, int txtPropsWidth, boolean exitOnFail) {
public kelondroTree(File file, long buffersize, long preloadTime, int objectCachePercent, kelondroRow rowdef, kelondroOrder objectOrder, int txtProps, int txtPropsWidth, boolean exitOnFail) {
// this creates a new tree file
super(file,
(100 - objectCachePercent) * buffersize / 100,
(100 - objectCachePercent) * buffersize / 100, preloadTime,
thisOHBytes, thisOHHandles, rowdef,
thisFHandles, txtProps, txtPropsWidth, exitOnFail);
try {
@ -120,15 +120,15 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
initObjectCache(buffersize, objectCachePercent);
}
public kelondroTree(kelondroRA ra, long buffersize, int objectCachePercent, kelondroRow rowdef, boolean exitOnFail) {
public kelondroTree(kelondroRA ra, long buffersize, long preloadTime, int objectCachePercent, kelondroRow rowdef, boolean exitOnFail) {
// this creates a new tree within a kelondroRA
this(ra, buffersize, objectCachePercent, rowdef, new kelondroNaturalOrder(true), rowdef.columns() /* txtProps */, 80 /* txtPropWidth */, exitOnFail);
this(ra, buffersize, preloadTime, objectCachePercent, rowdef, new kelondroNaturalOrder(true), rowdef.columns() /* txtProps */, 80 /* txtPropWidth */, exitOnFail);
}
public kelondroTree(kelondroRA ra, long buffersize, int objectCachePercent, kelondroRow rowdef, kelondroOrder objectOrder, int txtProps, int txtPropsWidth, boolean exitOnFail) {
public kelondroTree(kelondroRA ra, long buffersize, long preloadTime, int objectCachePercent, kelondroRow rowdef, kelondroOrder objectOrder, int txtProps, int txtPropsWidth, boolean exitOnFail) {
// this creates a new tree within a kelondroRA
super(ra,
(100 - objectCachePercent) * buffersize / 100,
(100 - objectCachePercent) * buffersize / 100, preloadTime,
thisOHBytes, thisOHHandles, rowdef,
thisFHandles, txtProps, txtPropsWidth, exitOnFail);
try {
@ -144,17 +144,17 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
initObjectCache(buffersize, objectCachePercent);
}
public kelondroTree(File file, long buffersize, int objectCachePercent) throws IOException {
public kelondroTree(File file, long buffersize, long preloadTime, int objectCachePercent) throws IOException {
// this opens a file with an existing tree file
super(file, (100 - objectCachePercent) * buffersize / 100);
super(file, (100 - objectCachePercent) * buffersize / 100, preloadTime);
readOrderType();
super.setLogger(log);
initObjectCache(buffersize, objectCachePercent);
}
public kelondroTree(kelondroRA ra, long buffersize, int objectCachePercent) throws IOException {
public kelondroTree(kelondroRA ra, long buffersize, long preloadTime, int objectCachePercent) throws IOException {
// this opens a file with an existing tree in a kelondroRA
super(ra, (100 - objectCachePercent) * buffersize / 100);
super(ra, (100 - objectCachePercent) * buffersize / 100, preloadTime);
readOrderType();
super.setLogger(log);
initObjectCache(buffersize, objectCachePercent);
@ -1299,7 +1299,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
// test script
File testFile = new File("test.db");
while (testFile.exists()) testFile.delete();
kelondroTree fm = new kelondroTree(testFile, 0x100000, 10, 4, 4, true);
kelondroTree fm = new kelondroTree(testFile, 0x100000, 0, 10, 4, 4, true);
byte[] dummy = "".getBytes();
fm.put("abc0".getBytes(), dummy); fm.put("bcd0".getBytes(), dummy);
fm.put("def0".getBytes(), dummy); fm.put("bab0".getBytes(), dummy);
@ -1321,7 +1321,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
ret = null;
}
} else if (args.length == 2) {
kelondroTree fm = new kelondroTree(new File(args[1]), 0x100000, 10);
kelondroTree fm = new kelondroTree(new File(args[1]), 0x100000, 0, 10);
if (args[0].equals("-v")) {
fm.print();
ret = null;
@ -1329,11 +1329,11 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
fm.close();
} else if (args.length == 3) {
if (args[0].equals("-d")) {
kelondroTree fm = new kelondroTree(new File(args[1]), 0x100000, 10);
kelondroTree fm = new kelondroTree(new File(args[1]), 0x100000, 0, 10);
fm.remove(args[2].getBytes());
fm.close();
} else if (args[0].equals("-i")) {
kelondroTree fm = new kelondroTree(new File(args[1]), 0x100000, 10);
kelondroTree fm = new kelondroTree(new File(args[1]), 0x100000, 0, 10);
int i = fm.imp(new File(args[1]),";");
fm.close();
ret = (i + " records imported").getBytes();
@ -1356,12 +1356,12 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
if (f != null) try {f.close();}catch(Exception e){}
}
} else if (args[0].equals("-g")) {
kelondroTree fm = new kelondroTree(new File(args[1]), 0x100000, 10);
kelondroTree fm = new kelondroTree(new File(args[1]), 0x100000, 0, 10);
kelondroRow.Entry ret2 = fm.get(args[2].getBytes());
ret = ((ret2 == null) ? null : ret2.getColBytes(1));
fm.close();
} else if (args[0].equals("-n")) {
kelondroTree fm = new kelondroTree(new File(args[1]), 0x100000, 10);
kelondroTree fm = new kelondroTree(new File(args[1]), 0x100000, 0, 10);
//byte[][] keys = fm.getSequentialKeys(args[2].getBytes(), 500, true);
Iterator rowIt = fm.rows(true, false, (args[2].length() == 0) ? null : args[2].getBytes());
Vector v = new Vector();
@ -1375,10 +1375,10 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
File f = new File(args[3]);
if (f.exists()) f.delete();
kelondroRow lens = new kelondroRow(new int[]{Integer.parseInt(args[1]), Integer.parseInt(args[2])});
kelondroTree fm = new kelondroTree(f, 0x100000, 10, lens, true);
kelondroTree fm = new kelondroTree(f, 0x100000, 0, 10, lens, true);
fm.close();
} else if (args[0].equals("-u")) {
kelondroTree fm = new kelondroTree(new File(args[3]), 0x100000, 10);
kelondroTree fm = new kelondroTree(new File(args[3]), 0x100000, 0, 10);
ret = fm.put(args[1].getBytes(), args[2].getBytes());
fm.close();
}
@ -1441,7 +1441,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
int steps = 0;
while (true) {
if (testFile.exists()) testFile.delete();
tt = new kelondroTree(testFile, 200, 10, 4 ,4, true);
tt = new kelondroTree(testFile, 200, 0, 10, 4 ,4, true);
steps = 10 + ((int) System.currentTimeMillis() % 7) * (((int) System.currentTimeMillis() + 17) % 11);
t = s;
d = "";
@ -1507,7 +1507,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
File f = new File("test.db");
if (f.exists()) f.delete();
try {
kelondroTree tt = new kelondroTree(f, 1000, 10, 4, 4, true);
kelondroTree tt = new kelondroTree(f, 1000, 0, 10, 4, 4, true);
byte[] b;
b = testWord('B'); tt.put(b, b); //tt.print();
b = testWord('C'); tt.put(b, b); //tt.print();
@ -1541,7 +1541,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
File f = new File("test.db");
if (f.exists()) f.delete();
try {
kelondroTree tt = new kelondroTree(f, 0, 10, 4, 4, true);
kelondroTree tt = new kelondroTree(f, 0, 0, 10, 4, 4, true);
byte[] b;
for (int i = 0; i < 100; i++) {
b = ("T" + i).getBytes(); tt.put(b, b);
@ -1570,7 +1570,7 @@ public class kelondroTree extends kelondroRecords implements kelondroIndex {
public static kelondroTree testTree(File f, String testentities) throws IOException {
if (f.exists()) f.delete();
kelondroTree tt = new kelondroTree(f, 0, 10, 4, 4, true);
kelondroTree tt = new kelondroTree(f, 0, 0, 10, 4, 4, true);
byte[] b;
for (int i = 0; i < testentities.length(); i++) {
b = testWord(testentities.charAt(i));

@ -16,6 +16,7 @@ public abstract class AbstractImporter extends Thread implements dbImporter{
protected plasmaSwitchboard sb;
protected File importPath;
protected int cacheSize;
protected long preloadTime;
protected long globalStart = System.currentTimeMillis();
protected long globalEnd;

@ -22,7 +22,7 @@ public class AssortmentImporter extends AbstractImporter implements dbImporter{
this.jobType = "ASSORTMENT";
}
public void init(File theImportAssortmentFile, int theCacheSize) {
public void init(File theImportAssortmentFile, int theCacheSize, long preloadTime) {
super.init(theImportAssortmentFile);
this.importAssortmentFile = theImportAssortmentFile;
this.cacheSize = theCacheSize;
@ -61,7 +61,7 @@ public class AssortmentImporter extends AbstractImporter implements dbImporter{
// initializing the import assortment db
this.log.logInfo("Initializing source assortment file");
this.assortmentFile = new plasmaWordIndexAssortment(importAssortmentPath,assortmentNr, this.cacheSize/1024, this.log);
this.assortmentFile = new plasmaWordIndexAssortment(importAssortmentPath,assortmentNr, this.cacheSize/1024, preloadTime, this.log);
this.importStartSize = this.assortmentFile.size();
}

@ -24,6 +24,6 @@ public interface dbImporter {
public String getError();
public String getStatus();
public void init(File importPath, int cacheSize);
public void init(File importPath, int cacheSize, long preloadTime);
public void startIt();
}

@ -11,8 +11,7 @@ import de.anomic.plasma.plasmaCrawlProfile;
import de.anomic.plasma.plasmaSwitchboard;
import de.anomic.plasma.plasmaCrawlNURL.Entry;
public class plasmaCrawlNURLImporter extends AbstractImporter implements
dbImporter {
public class plasmaCrawlNURLImporter extends AbstractImporter implements dbImporter {
private HashSet importProfileHandleCache = new HashSet();
private plasmaCrawlProfile importProfileDB;
@ -47,9 +46,10 @@ public class plasmaCrawlNURLImporter extends AbstractImporter implements
return theStatus.toString();
}
public void init(File theImportPath, int theCacheSize) {
public void init(File theImportPath, int theCacheSize, long preloadTime) {
super.init(theImportPath);
this.cacheSize = theCacheSize;
this.preloadTime = preloadTime;
File noticeUrlDbFile = new File(this.importPath,"urlNotice1.db");
File profileDbFile = new File(this.importPath, "crawlProfiles0.db");
@ -89,13 +89,13 @@ public class plasmaCrawlNURLImporter extends AbstractImporter implements
// init noticeUrlDB
this.log.logInfo("Initializing the source noticeUrlDB");
this.importNurlDB = new plasmaCrawlNURL(this.importPath, ((this.cacheSize*3)/4)/1024);
this.importNurlDB = new plasmaCrawlNURL(this.importPath, ((this.cacheSize*3)/4)/1024, preloadTime);
this.importStartSize = this.importNurlDB.size();
//int stackSize = this.importNurlDB.stackSize();
// init profile DB
this.log.logInfo("Initializing the source profileDB");
this.importProfileDB = new plasmaCrawlProfile(profileDbFile, ((this.cacheSize*1)/4)/1024);
this.importProfileDB = new plasmaCrawlProfile(profileDbFile, ((this.cacheSize*1)/4)/1024, 300);
}
public void run() {

@ -51,7 +51,7 @@ public class plasmaDbImporter extends AbstractImporter implements dbImporter {
return theStatus.toString();
}
public void init(File theImportPath, int theCacheSize) {
public void init(File theImportPath, int theCacheSize, long preloadTime) {
super.init(theImportPath);
this.homeWordIndex = this.sb.wordIndex;
@ -75,9 +75,9 @@ public class plasmaDbImporter extends AbstractImporter implements dbImporter {
}
this.log.logFine("Initializing source word index db.");
this.importWordIndex = new plasmaWordIndex(this.importPath, (this.cacheSize/2)/1024, this.log);
this.importWordIndex = new plasmaWordIndex(this.importPath, (this.cacheSize/2)/1024, preloadTime / 2, this.log);
this.log.logFine("Initializing import URL db.");
this.importUrlDB = new plasmaCrawlLURL(new File(this.importPath, "urlHash.db"), (this.cacheSize/2)/1024);
this.importUrlDB = new plasmaCrawlLURL(new File(this.importPath, "urlHash.db"), (this.cacheSize/2)/1024, preloadTime / 2);
this.importStartSize = this.importWordIndex.size();
}

@ -58,15 +58,15 @@ public class plasmaCrawlBalancer {
private kelondroStack stack;
private HashMap domainStacks;
public plasmaCrawlBalancer(File stackFile, long buffersize) {
public plasmaCrawlBalancer(File stackFile) {
if (stackFile.exists()) {
try {
stack = new kelondroStack(stackFile, buffersize);
stack = new kelondroStack(stackFile);
} catch (IOException e) {
stack = new kelondroStack(stackFile, buffersize, new kelondroRow(new int[] {indexURL.urlHashLength}), true);
stack = new kelondroStack(stackFile, new kelondroRow(new int[] {indexURL.urlHashLength}), true);
}
} else {
stack = new kelondroStack(stackFile, buffersize, new kelondroRow(new int[] {indexURL.urlHashLength}), true);
stack = new kelondroStack(stackFile, new kelondroRow(new int[] {indexURL.urlHashLength}), true);
}
domainStacks = new HashMap();
}

@ -62,7 +62,7 @@ public class plasmaCrawlEURL extends indexURL {
private LinkedList rejectedStack = new LinkedList(); // strings: url
public plasmaCrawlEURL(File cachePath, int bufferkb) {
public plasmaCrawlEURL(File cachePath, int bufferkb, long preloadTime) {
super();
int[] ce = {
urlHashLength, // the url's hash
@ -79,14 +79,14 @@ public class plasmaCrawlEURL extends indexURL {
};
if (cachePath.exists()) try {
// open existing cache
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent);
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent);
} catch (IOException e) {
cachePath.delete();
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
} else {
// create new cache
cachePath.getParentFile().mkdirs();
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
}
}

@ -94,7 +94,7 @@ public final class plasmaCrawlLURL extends indexURL {
//public static Set damagedURLS = Collections.synchronizedSet(new HashSet());
public plasmaCrawlLURL(File cachePath, int bufferkb) {
public plasmaCrawlLURL(File cachePath, int bufferkb, long preloadTime) {
super();
int[] ce = {
urlHashLength,
@ -116,15 +116,15 @@ public final class plasmaCrawlLURL extends indexURL {
if (cachePath.exists()) {
// open existing cache
try {
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent);
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent);
} catch (IOException e) {
cachePath.getParentFile().mkdirs();
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
}
} else {
// create new cache
cachePath.getParentFile().mkdirs();
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
urlHashCache = new kelondroTree(cachePath, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
}
// init result stacks
@ -974,7 +974,7 @@ public final class plasmaCrawlLURL extends indexURL {
} catch (MalformedURLException e) {}
if (args[0].equals("-l")) try {
// arg 1 is path to URLCache
final plasmaCrawlLURL urls = new plasmaCrawlLURL(new File(args[1]), 1);
final plasmaCrawlLURL urls = new plasmaCrawlLURL(new File(args[1]), 1, 0);
final Iterator enu = urls.entries(true, false);
while (enu.hasNext()) {
((Entry) enu.next()).print();

@ -101,12 +101,14 @@ public class plasmaCrawlNURL extends indexURL {
private final HashSet stackIndex; // to find out if a specific link is already on any stack
private File cacheStacksPath;
private int bufferkb;
private long preloadTime;
initStackIndex initThead;
public plasmaCrawlNURL(File cacheStacksPath, int bufferkb) {
public plasmaCrawlNURL(File cacheStacksPath, int bufferkb, long preloadTime) {
super();
this.cacheStacksPath = cacheStacksPath;
this.bufferkb = bufferkb;
this.preloadTime = preloadTime;
// create a stack for newly entered entries
if (!(cacheStacksPath.exists())) cacheStacksPath.mkdir(); // make the path
@ -120,31 +122,31 @@ public class plasmaCrawlNURL extends indexURL {
File imageStackFile = new File(cacheStacksPath, "urlNoticeImage0.stack");
File movieStackFile = new File(cacheStacksPath, "urlNoticeMovie0.stack");
File musicStackFile = new File(cacheStacksPath, "urlNoticeMusic0.stack");
coreStack = new plasmaCrawlBalancer(coreStackFile, 0);
limitStack = new plasmaCrawlBalancer(limitStackFile, 0);
overhangStack = new plasmaCrawlBalancer(overhangStackFile, 0);
remoteStack = new plasmaCrawlBalancer(remoteStackFile, 0);
coreStack = new plasmaCrawlBalancer(coreStackFile);
limitStack = new plasmaCrawlBalancer(limitStackFile);
overhangStack = new plasmaCrawlBalancer(overhangStackFile);
remoteStack = new plasmaCrawlBalancer(remoteStackFile);
kelondroRow rowdef = new kelondroRow(new int[] {indexURL.urlHashLength});
if (imageStackFile.exists()) try {
imageStack = new kelondroStack(imageStackFile, 0);
imageStack = new kelondroStack(imageStackFile);
} catch (IOException e) {
imageStack = new kelondroStack(imageStackFile, 0, rowdef, true);
imageStack = new kelondroStack(imageStackFile, rowdef, true);
} else {
imageStack = new kelondroStack(imageStackFile, 0, rowdef, true);
imageStack = new kelondroStack(imageStackFile, rowdef, true);
}
if (movieStackFile.exists()) try {
movieStack = new kelondroStack(movieStackFile, 0);
movieStack = new kelondroStack(movieStackFile);
} catch (IOException e) {
movieStack = new kelondroStack(movieStackFile, 0, rowdef, true);
movieStack = new kelondroStack(movieStackFile, rowdef, true);
} else {
movieStack = new kelondroStack(movieStackFile, 0, rowdef, true);
movieStack = new kelondroStack(movieStackFile, rowdef, true);
}
if (musicStackFile.exists()) try {
musicStack = new kelondroStack(musicStackFile, 0);
musicStack = new kelondroStack(musicStackFile);
} catch (IOException e) {
musicStack = new kelondroStack(musicStackFile, 0, rowdef, true);
musicStack = new kelondroStack(musicStackFile, rowdef, true);
} else {
musicStack = new kelondroStack(musicStackFile, 0, rowdef, true);
musicStack = new kelondroStack(musicStackFile, rowdef, true);
}
// init stack Index
@ -166,14 +168,14 @@ public class plasmaCrawlNURL extends indexURL {
File cacheFile = new File(cacheStacksPath, "urlNotice1.db");
if (cacheFile.exists()) try {
// open existing cache
urlHashCache = new kelondroTree(cacheFile, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent);
urlHashCache = new kelondroTree(cacheFile, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent);
} catch (IOException e) {
cacheFile.delete();
urlHashCache = new kelondroTree(cacheFile, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
urlHashCache = new kelondroTree(cacheFile, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
} else {
// create new cache
cacheFile.getParentFile().mkdirs();
urlHashCache = new kelondroTree(cacheFile, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
urlHashCache = new kelondroTree(cacheFile, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(ce), true);
}
}

@ -61,18 +61,21 @@ public class plasmaCrawlProfile {
private HashMap domsCache;
private File profileTableFile;
private int bufferkb;
private long preloadTime;
public plasmaCrawlProfile(File file, int bufferkb) {
public plasmaCrawlProfile(File file, int bufferkb, long preloadTime) {
this.profileTableFile = file;
this.bufferkb = bufferkb;
this.preloadTime = preloadTime;
kelondroDyn dyn = null;
if (profileTableFile.exists()) try {
dyn = new kelondroDyn(file, bufferkb * 1024, '#');
dyn = new kelondroDyn(file, bufferkb * 1024, preloadTime, '#');
} catch (IOException e) {
profileTableFile.delete();
dyn = new kelondroDyn(file, bufferkb * 1024, indexURL.urlCrawlProfileHandleLength, 2000, '#', true);
dyn = new kelondroDyn(file, bufferkb * 1024, preloadTime, indexURL.urlCrawlProfileHandleLength, 2000, '#', true);
} else {
profileTableFile.getParentFile().mkdirs();
dyn = new kelondroDyn(file, bufferkb * 1024, indexURL.urlCrawlProfileHandleLength, 2000, '#', true);
dyn = new kelondroDyn(file, bufferkb * 1024, preloadTime, indexURL.urlCrawlProfileHandleLength, 2000, '#', true);
}
profileTable = new kelondroMap(dyn);
domsCache = new HashMap();
@ -95,7 +98,7 @@ public class plasmaCrawlProfile {
if (profileTable != null) try { profileTable.close(); } catch (IOException e) {}
if (!(profileTableFile.delete())) throw new RuntimeException("cannot delete crawl profile database");
profileTableFile.getParentFile().mkdirs();
profileTable = new kelondroMap(new kelondroDyn(profileTableFile, bufferkb * 1024, indexURL.urlCrawlProfileHandleLength, 2000, '#', true));
profileTable = new kelondroMap(new kelondroDyn(profileTableFile, bufferkb * 1024, preloadTime, indexURL.urlCrawlProfileHandleLength, 2000, '#', true));
}
public void close() {

@ -65,23 +65,25 @@ public class plasmaCrawlRobotsTxt {
kelondroMap robotsTable;
private final File robotsTableFile;
private int bufferkb;
private long preloadTime;
public plasmaCrawlRobotsTxt(File robotsTableFile, int bufferkb) {
public plasmaCrawlRobotsTxt(File robotsTableFile, int bufferkb, long preloadTime) {
this.robotsTableFile = robotsTableFile;
this.bufferkb = bufferkb;
this.preloadTime = preloadTime;
if (robotsTableFile.exists()) {
try {
robotsTable = new kelondroMap(new kelondroDyn(robotsTableFile, bufferkb * 1024, '_'));
robotsTable = new kelondroMap(new kelondroDyn(robotsTableFile, bufferkb * 1024, preloadTime, '_'));
} catch (kelondroException e) {
robotsTableFile.delete();
robotsTable = new kelondroMap(new kelondroDyn(robotsTableFile, bufferkb * 1024, 256, 512, '_', true));
robotsTable = new kelondroMap(new kelondroDyn(robotsTableFile, bufferkb * 1024, preloadTime, 256, 512, '_', true));
} catch (IOException e) {
robotsTableFile.delete();
robotsTable = new kelondroMap(new kelondroDyn(robotsTableFile, bufferkb * 1024, 256, 512, '_', true));
robotsTable = new kelondroMap(new kelondroDyn(robotsTableFile, bufferkb * 1024, preloadTime, 256, 512, '_', true));
}
} else {
robotsTableFile.getParentFile().mkdirs();
robotsTable = new kelondroMap(new kelondroDyn(robotsTableFile, bufferkb * 1024, 256, 512, '_', true));
robotsTable = new kelondroMap(new kelondroDyn(robotsTableFile, bufferkb * 1024, preloadTime, 256, 512, '_', true));
}
}
@ -104,7 +106,7 @@ public class plasmaCrawlRobotsTxt {
} catch (IOException e) {}
if (!(robotsTableFile.delete())) throw new RuntimeException("cannot delete robots.txt database");
robotsTableFile.getParentFile().mkdirs();
robotsTable = new kelondroMap(new kelondroDyn(robotsTableFile, this.bufferkb, 256, 512, '_', true));
robotsTable = new kelondroMap(new kelondroDyn(robotsTableFile, this.bufferkb, preloadTime, 256, 512, '_', true));
}
public void close() {

@ -79,10 +79,10 @@ public final class plasmaCrawlStacker {
//private boolean stopped = false;
private stackCrawlQueue queue;
public plasmaCrawlStacker(plasmaSwitchboard sb, File dbPath, int dbCacheSize) {
public plasmaCrawlStacker(plasmaSwitchboard sb, File dbPath, int dbCacheSize, long preloadTime) {
this.sb = sb;
this.queue = new stackCrawlQueue(dbPath,dbCacheSize);
this.queue = new stackCrawlQueue(dbPath, dbCacheSize, preloadTime);
this.log.logInfo(this.queue.size() + " entries in the stackCrawl queue.");
this.log.logInfo("STACKCRAWL thread initialized.");
@ -559,7 +559,7 @@ public final class plasmaCrawlStacker {
private final LinkedList urlEntryHashCache;
private kelondroTree urlEntryCache;
public stackCrawlQueue(File cacheStacksPath, int bufferkb) {
public stackCrawlQueue(File cacheStacksPath, int bufferkb, long preloadTime) {
// init the read semaphore
this.readSync = new serverSemaphore (0);
@ -576,10 +576,10 @@ public final class plasmaCrawlStacker {
if (cacheFile.exists()) {
// open existing cache
try {
this.urlEntryCache = new kelondroTree(cacheFile, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent);
this.urlEntryCache = new kelondroTree(cacheFile, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent);
} catch (IOException e) {
cacheFile.delete();
this.urlEntryCache = new kelondroTree(cacheFile, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, new kelondroRow(plasmaCrawlNURL.ce), true);
this.urlEntryCache = new kelondroTree(cacheFile, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(plasmaCrawlNURL.ce), true);
}
try {
// loop through the list and fill the messageList with url hashs
@ -601,7 +601,7 @@ public final class plasmaCrawlStacker {
// deleting old db and creating a new db
try {this.urlEntryCache.close();}catch(Exception ex){}
cacheFile.delete();
this.urlEntryCache = new kelondroTree(cacheFile, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, new kelondroRow(plasmaCrawlNURL.ce), true);
this.urlEntryCache = new kelondroTree(cacheFile, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(plasmaCrawlNURL.ce), true);
} catch (IOException e) {
/* if we have an error, we start with a fresh database */
plasmaCrawlStacker.this.log.logSevere("Unable to initialize crawl stacker queue, IOException:" + e.getMessage() + ". Reseting DB.\n",e);
@ -609,12 +609,12 @@ public final class plasmaCrawlStacker {
// deleting old db and creating a new db
try {this.urlEntryCache.close();}catch(Exception ex){}
cacheFile.delete();
this.urlEntryCache = new kelondroTree(cacheFile, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, new kelondroRow(plasmaCrawlNURL.ce), true);
this.urlEntryCache = new kelondroTree(cacheFile, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(plasmaCrawlNURL.ce), true);
}
} else {
// create new cache
cacheFile.getParentFile().mkdirs();
this.urlEntryCache = new kelondroTree(cacheFile, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, new kelondroRow(plasmaCrawlNURL.ce), true);
this.urlEntryCache = new kelondroTree(cacheFile, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(plasmaCrawlNURL.ce), true);
}
}

@ -97,7 +97,7 @@ public final class plasmaHTCache {
public final serverLog log;
public static final HashSet filesInUse = new HashSet(); // can we delete this file
public plasmaHTCache(File htCachePath, long maxCacheSize, int bufferkb) {
public plasmaHTCache(File htCachePath, long maxCacheSize, int bufferkb, long preloadTime) {
// this.switchboard = switchboard;
this.log = new serverLog("HTCACHE");
@ -156,9 +156,9 @@ public final class plasmaHTCache {
File dbfile = new File(this.cachePath, "responseHeader.db");
try {
if (dbfile.exists())
this.responseHeaderDB = new kelondroMap(new kelondroDyn(dbfile, bufferkb * 0x400, '#'));
this.responseHeaderDB = new kelondroMap(new kelondroDyn(dbfile, bufferkb * 0x400, preloadTime, '#'));
else
this.responseHeaderDB = new kelondroMap(new kelondroDyn(dbfile, bufferkb * 0x400, indexURL.urlHashLength, 150, '#', false));
this.responseHeaderDB = new kelondroMap(new kelondroDyn(dbfile, bufferkb * 0x400, preloadTime, indexURL.urlHashLength, 150, '#', false));
} catch (IOException e) {
this.log.logSevere("the request header database could not be opened: " + e.getMessage());
System.exit(0);

@ -329,33 +329,44 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
}
// read memory amount
int ramLURL = (int) getConfigLong("ramCacheLURL", 1024) / 1024;
int ramNURL = (int) getConfigLong("ramCacheNURL", 1024) / 1024;
int ramEURL = (int) getConfigLong("ramCacheEURL", 1024) / 1024;
int ramRWI = (int) getConfigLong("ramCacheRWI", 1024) / 1024;
int ramHTTP = (int) getConfigLong("ramCacheHTTP", 1024) / 1024;
int ramMessage = (int) getConfigLong("ramCacheMessage", 1024) / 1024;
int ramRobots = (int) getConfigLong("ramCacheRobots",1024) / 1024;
int ramProfiles= (int) getConfigLong("ramCacheProfiles",1024) / 1024;
int ramPreNURL = (int) getConfigLong("ramCachePreNURL", 1024) / 1024;
int ramWiki = (int) getConfigLong("ramCacheWiki", 1024) / 1024;
int ramBlog = (int) getConfigLong("ramCacheBlog", 1024) / 1024;
this.log.logConfig("LURL Cache memory = " + ppRamString(ramLURL));
this.log.logConfig("NURL Cache memory = " + ppRamString(ramNURL));
this.log.logConfig("EURL Cache memory = " + ppRamString(ramEURL));
this.log.logConfig("RWI Cache memory = " + ppRamString(ramRWI));
this.log.logConfig("HTTP Cache memory = " + ppRamString(ramHTTP));
this.log.logConfig("Message Cache memory = " + ppRamString(ramMessage));
this.log.logConfig("Wiki Cache memory = " + ppRamString(ramWiki));
this.log.logConfig("Blog Cache memory = " + ppRamString(ramBlog));
this.log.logConfig("Robots Cache memory = " + ppRamString(ramRobots));
this.log.logConfig("Profiles Cache memory = " + ppRamString(ramProfiles));
this.log.logConfig("PreNURL Cache memory = " + ppRamString(ramPreNURL));
int ramLURL = (int) getConfigLong("ramCacheLURL", 1024) / 1024;
long ramLURL_time = getConfigLong("ramCacheLURL_time", 1000);
int ramNURL = (int) getConfigLong("ramCacheNURL", 1024) / 1024;
long ramNURL_time = getConfigLong("ramCacheNURL_time", 1000);
int ramEURL = (int) getConfigLong("ramCacheEURL", 1024) / 1024;
long ramEURL_time = getConfigLong("ramCacheEURL_time", 1000);
int ramRWI = (int) getConfigLong("ramCacheRWI", 1024) / 1024;
long ramRWI_time = getConfigLong("ramCacheRWI_time", 1000);
int ramHTTP = (int) getConfigLong("ramCacheHTTP", 1024) / 1024;
long ramHTTP_time = getConfigLong("ramCacheHTTP_time", 1000);
int ramMessage = (int) getConfigLong("ramCacheMessage", 1024) / 1024;
long ramMessage_time = getConfigLong("ramCacheMessage_time", 1000);
int ramRobots = (int) getConfigLong("ramCacheRobots",1024) / 1024;
long ramRobots_time = getConfigLong("ramCacheRobots_time",1000);
int ramProfiles = (int) getConfigLong("ramCacheProfiles",1024) / 1024;
long ramProfiles_time= getConfigLong("ramCacheProfiles_time", 1000);
int ramPreNURL = (int) getConfigLong("ramCachePreNURL", 1024) / 1024;
long ramPreNURL_time = getConfigLong("ramCachePreNURL_time", 1000);
int ramWiki = (int) getConfigLong("ramCacheWiki", 1024) / 1024;
long ramWiki_time = getConfigLong("ramCacheWiki_time", 1000);
int ramBlog = (int) getConfigLong("ramCacheBlog", 1024) / 1024;
long ramBlog_time = getConfigLong("ramCacheBlog_time", 1000);
this.log.logConfig("LURL Cache memory = " + ppRamString(ramLURL) + ", preloadTime = " + ramLURL_time);
this.log.logConfig("NURL Cache memory = " + ppRamString(ramNURL) + ", preloadTime = " + ramNURL_time);
this.log.logConfig("EURL Cache memory = " + ppRamString(ramEURL) + ", preloadTime = " + ramEURL_time);
this.log.logConfig("RWI Cache memory = " + ppRamString(ramRWI) + ", preloadTime = " + ramRWI_time);
this.log.logConfig("HTTP Cache memory = " + ppRamString(ramHTTP) + ", preloadTime = " + ramHTTP_time);
this.log.logConfig("Message Cache memory = " + ppRamString(ramMessage) + ", preloadTime = " + ramMessage_time);
this.log.logConfig("Wiki Cache memory = " + ppRamString(ramWiki) + ", preloadTime = " + ramWiki_time);
this.log.logConfig("Blog Cache memory = " + ppRamString(ramBlog) + ", preloadTime = " + ramBlog_time);
this.log.logConfig("Robots Cache memory = " + ppRamString(ramRobots) + ", preloadTime = " + ramRobots_time);
this.log.logConfig("Profiles Cache memory = " + ppRamString(ramProfiles) + ", preloadTime = " + ramProfiles_time);
this.log.logConfig("PreNURL Cache memory = " + ppRamString(ramPreNURL) + ", preloadTime = " + ramPreNURL_time);
// make crawl profiles database and default profiles
this.log.logConfig("Initializing Crawl Profiles");
File profilesFile = new File(this.plasmaPath, "crawlProfiles0.db");
this.profiles = new plasmaCrawlProfile(profilesFile, ramProfiles);
this.profiles = new plasmaCrawlProfile(profilesFile, ramProfiles, ramProfiles_time);
initProfiles();
log.logConfig("Loaded profiles from file " + profilesFile.getName() +
", " + this.profiles.size() + " entries" +
@ -364,16 +375,16 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
// loading the robots.txt db
this.log.logConfig("Initializing robots.txt DB");
File robotsDBFile = new File(this.plasmaPath, "crawlRobotsTxt.db");
robots = new plasmaCrawlRobotsTxt(robotsDBFile, ramRobots);
robots = new plasmaCrawlRobotsTxt(robotsDBFile, ramRobots, ramRobots_time);
this.log.logConfig("Loaded robots.txt DB from file " + robotsDBFile.getName() +
", " + robots.size() + " entries" +
", " + ppRamString(robotsDBFile.length()/1024));
// start indexing management
log.logConfig("Starting Indexing Management");
urlPool = new plasmaURLPool(plasmaPath, ramLURL, ramNURL, ramEURL);
urlPool = new plasmaURLPool(plasmaPath, ramLURL, ramNURL, ramEURL, ramLURL_time);
wordIndex = new plasmaWordIndex(plasmaPath, ramRWI, log);
wordIndex = new plasmaWordIndex(plasmaPath, ramRWI, ramRWI_time, log);
int wordCacheMaxCount = (int) getConfigLong("wordCacheMaxCount", 10000);
wordIndex.setMaxWordCount(wordCacheMaxCount);
@ -391,7 +402,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
}
this.log.logInfo("HTCACHE Path = " + htCachePath.getAbsolutePath());
long maxCacheSize = 1024 * 1024 * Long.parseLong(getConfig("proxyCacheSize", "2")); // this is megabyte
this.cacheManager = new plasmaHTCache(htCachePath, maxCacheSize, ramHTTP);
this.cacheManager = new plasmaHTCache(htCachePath, maxCacheSize, ramHTTP, ramHTTP_time);
// make parser
log.logConfig("Starting Parser");
@ -460,18 +471,18 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
Boolean.valueOf(getConfig(CRAWLJOB_GLOBAL_CRAWL_TRIGGER + "_isPaused", "false"))});
// starting board
initMessages(ramMessage);
initMessages(ramMessage, ramMessage_time);
// starting wiki
initWiki(ramWiki);
initWiki(ramWiki, ramWiki_time);
//starting blog
initBlog(ramBlog);
initBlog(ramBlog, ramBlog_time);
// Init User DB
this.log.logConfig("Loading User DB");
File userDbFile = new File(getRootPath(), "DATA/SETTINGS/user.db");
this.userDB = new userDB(userDbFile, 512);
this.userDB = new userDB(userDbFile, 512, 500);
this.log.logConfig("Loaded User DB from file " + userDbFile.getName() +
", " + this.userDB.size() + " entries" +
", " + ppRamString(userDbFile.length()/1024));
@ -547,7 +558,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
serverInstantThread.oneTimeJob(yc, "loadSeeds", yacyCore.log, 3000);
// initializing the stackCrawlThread
this.sbStackCrawlThread = new plasmaCrawlStacker(this,this.plasmaPath,ramPreNURL);
this.sbStackCrawlThread = new plasmaCrawlStacker(this, this.plasmaPath, ramPreNURL, ramPreNURL_time);
//this.sbStackCrawlThread = new plasmaStackCrawlThread(this,this.plasmaPath,ramPreNURL);
//this.sbStackCrawlThread.start();
@ -613,29 +624,28 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
}
public void initMessages(int ramMessage) {
public void initMessages(int ramMessage, long ramMessage_time) {
this.log.logConfig("Starting Message Board");
File messageDbFile = new File(workPath, "message.db");
this.messageDB = new messageBoard(messageDbFile, ramMessage);
this.messageDB = new messageBoard(messageDbFile, ramMessage, ramMessage_time);
this.log.logConfig("Loaded Message Board DB from file " + messageDbFile.getName() +
", " + this.messageDB.size() + " entries" +
", " + ppRamString(messageDbFile.length()/1024));
}
public void initWiki(int ramWiki) {
public void initWiki(int ramWiki, long ramWiki_time) {
this.log.logConfig("Starting Wiki Board");
File wikiDbFile = new File(workPath, "wiki.db");
this.wikiDB = new wikiBoard(wikiDbFile,
new File(workPath, "wiki-bkp.db"), ramWiki);
this.wikiDB = new wikiBoard(wikiDbFile, new File(workPath, "wiki-bkp.db"), ramWiki, ramWiki_time);
this.log.logConfig("Loaded Wiki Board DB from file " + wikiDbFile.getName() +
", " + this.wikiDB.size() + " entries" +
", " + ppRamString(wikiDbFile.length()/1024));
}
public void initBlog(int ramBlog) {
public void initBlog(int ramBlog, long ramBlog_time) {
this.log.logConfig("Starting Blog");
File blogDbFile = new File(workPath, "blog.db");
this.blogDB = new blogBoard(blogDbFile, ramBlog);
this.blogDB = new blogBoard(blogDbFile, ramBlog, ramBlog_time);
this.log.logConfig("Loaded Blog DB from file " + blogDbFile.getName() +
", " + this.blogDB.size() + " entries" +
", " + ppRamString(blogDbFile.length()/1024));
@ -645,7 +655,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
File bookmarksFile = new File(workPath, "bookmarks.db");
File tagsFile = new File(workPath, "bookmarkTags.db");
File datesFile = new File(workPath, "bookmarkDates.db");
this.bookmarksDB = new bookmarksDB(bookmarksFile, tagsFile, datesFile, 512);
this.bookmarksDB = new bookmarksDB(bookmarksFile, tagsFile, datesFile, 512, 500);
this.log.logConfig("Loaded Bookmarks DB from files "+ bookmarksFile.getName()+ ", "+tagsFile.getName());
this.log.logConfig(this.bookmarksDB.tagsSize()+" Tag, "+this.bookmarksDB.bookmarksSize()+" Bookmarks");
}
@ -709,7 +719,8 @@ public final class plasmaSwitchboard extends serverAbstractSwitch implements ser
final File pdb = new File(plasmaPath, "crawlProfiles0.db");
if (pdb.exists()) pdb.delete();
int ramProfiles = (int) getConfigLong("ramCacheProfiles", 1024) / 1024;
profiles = new plasmaCrawlProfile(pdb, ramProfiles);
long ramProfiles_time = getConfigLong("ramCacheProfiles_time", 1000);
profiles = new plasmaCrawlProfile(pdb, ramProfiles, ramProfiles_time);
initProfiles();
}

@ -91,12 +91,12 @@ public class plasmaSwitchboardQueue {
indexURL.urlDescrLength
});
if (sbQueueStackPath.exists()) try {
sbQueueStack = new kelondroStack(sbQueueStackPath, 0);
sbQueueStack = new kelondroStack(sbQueueStackPath);
} catch (IOException e) {
sbQueueStackPath.delete();
sbQueueStack = new kelondroStack(sbQueueStackPath, 0, rowdef, true);
sbQueueStack = new kelondroStack(sbQueueStackPath, rowdef, true);
} else {
sbQueueStack = new kelondroStack(sbQueueStackPath, 0, rowdef, true);
sbQueueStack = new kelondroStack(sbQueueStackPath, rowdef, true);
}
}

@ -57,10 +57,10 @@ public class plasmaURLPool {
public final plasmaCrawlNURL noticeURL;
public final plasmaCrawlEURL errorURL;
public plasmaURLPool(File plasmaPath, int ramLURL, int ramNURL, int ramEURL) {
loadedURL = new plasmaCrawlLURL(new File(plasmaPath, "urlHash.db"), ramLURL);
noticeURL = new plasmaCrawlNURL(plasmaPath, ramNURL);
errorURL = new plasmaCrawlEURL(new File(plasmaPath, "urlErr0.db"), ramEURL);
public plasmaURLPool(File plasmaPath, int ramLURL, int ramNURL, int ramEURL, long preloadTime) {
loadedURL = new plasmaCrawlLURL(new File(plasmaPath, "urlHash.db"), ramLURL, preloadTime);
noticeURL = new plasmaCrawlNURL(plasmaPath, ramNURL, 0);
errorURL = new plasmaCrawlEURL(new File(plasmaPath, "urlErr0.db"), ramEURL, 0);
}
public String exists(String hash) {

@ -58,13 +58,13 @@ public class plasmaWordConnotation {
private static final int nodesize = 4048;
private kelondroDynTree refDB;
public plasmaWordConnotation(File refDBfile, int bufferkb, char fillChar) {
public plasmaWordConnotation(File refDBfile, int bufferkb, long preloadTime, char fillChar) {
if (refDBfile.exists()) try {
refDB = new kelondroDynTree(refDBfile, bufferkb * 0x400, fillChar);
refDB = new kelondroDynTree(refDBfile, bufferkb * 0x400, preloadTime, fillChar);
} catch (IOException e) {
refDB = new kelondroDynTree(refDBfile, bufferkb * 0x400, wordlength, nodesize, new kelondroRow(new int[] {wordlength, countlength}), fillChar, true);
refDB = new kelondroDynTree(refDBfile, bufferkb * 0x400, preloadTime, wordlength, nodesize, new kelondroRow(new int[] {wordlength, countlength}), fillChar, true);
} else {
refDB = new kelondroDynTree(refDBfile, bufferkb * 0x400, wordlength, nodesize, new kelondroRow(new int[] {wordlength, countlength}), fillChar, true);
refDB = new kelondroDynTree(refDBfile, bufferkb * 0x400, preloadTime, wordlength, nodesize, new kelondroRow(new int[] {wordlength, countlength}), fillChar, true);
}
}

@ -85,7 +85,7 @@ public final class plasmaWordIndex extends indexAbstractRI implements indexRI {
private final plasmaWordIndexFileCluster backend;
private final kelondroOrder indexOrder = new kelondroNaturalOrder(true);
public plasmaWordIndex(File databaseRoot, int bufferkb, serverLog log) {
public plasmaWordIndex(File databaseRoot, int bufferkb, long preloadTime, serverLog log) {
this.databaseRoot = databaseRoot;
this.backend = new plasmaWordIndexFileCluster(databaseRoot, log);
this.ramCache = new indexRAMCacheRI(databaseRoot, log);
@ -94,7 +94,7 @@ public final class plasmaWordIndex extends indexAbstractRI implements indexRI {
File assortmentClusterPath = new File(databaseRoot, indexAssortmentClusterPath);
if (!(assortmentClusterPath.exists())) assortmentClusterPath.mkdirs();
this.assortmentBufferSize = bufferkb;
this.assortmentCluster = new plasmaWordIndexAssortmentCluster(assortmentClusterPath, assortmentCount, assortmentBufferSize, log);
this.assortmentCluster = new plasmaWordIndexAssortmentCluster(assortmentClusterPath, assortmentCount, assortmentBufferSize, preloadTime, log);
}
public File getRoot() {
@ -673,7 +673,7 @@ public final class plasmaWordIndex extends indexAbstractRI implements indexRI {
// System.out.println(kelondroMSetTools.fastStringComparator(true).compare("RwGeoUdyDQ0Y", "rwGeoUdyDQ0Y"));
// System.out.println(new Date(reverseMicroDateDays(microDateDays(System.currentTimeMillis()))));
plasmaWordIndex index = new plasmaWordIndex(new File("D:\\dev\\proxy\\DATA\\PLASMADB"), 555, new serverLog("TESTAPP"));
plasmaWordIndex index = new plasmaWordIndex(new File("D:\\dev\\proxy\\DATA\\PLASMADB"), 555, 1000, new serverLog("TESTAPP"));
try {
Iterator iter = index.wordHashes("5A8yhZMh_Kmv", plasmaWordIndex.RL_WORDFILES, true);
while (iter.hasNext()) {

@ -83,6 +83,7 @@ public final class plasmaWordIndexAssortment {
private serverLog log;
private kelondroTree assortments;
private long bufferSize;
private long preloadTime;
private static String intx(int x) {
String s = Integer.toString(x);
@ -102,17 +103,18 @@ public final class plasmaWordIndexAssortment {
return structure;
}
public plasmaWordIndexAssortment(File storagePath, int assortmentLength, int bufferkb, serverLog log) {
public plasmaWordIndexAssortment(File storagePath, int assortmentLength, int bufferkb, long preloadTime, serverLog log) {
if (!(storagePath.exists())) storagePath.mkdirs();
this.assortmentFile = new File(storagePath, assortmentFileName + intx(assortmentLength) + ".db");
this.assortmentLength = assortmentLength;
//this.bufferStructureLength = 3 + 2 * assortmentLength;
this.bufferSize = bufferkb * 1024;
this.preloadTime = preloadTime;
this.log = log;
if (assortmentFile.exists()) {
// open existing assortment tree file
try {
assortments = new kelondroTree(assortmentFile, bufferSize, kelondroTree.defaultObjectCachePercent);
assortments = new kelondroTree(assortmentFile, bufferSize, preloadTime, kelondroTree.defaultObjectCachePercent);
if (log != null) log.logConfig("Opened Assortment Database, " + assortments.size() + " entries, width " + assortmentLength + ", " + bufferkb + "kb buffer");
return;
} catch (IOException e){
@ -123,7 +125,7 @@ public final class plasmaWordIndexAssortment {
assortmentFile.delete(); // make space for new one
}
// create new assortment tree file
assortments = new kelondroTree(assortmentFile, bufferSize, kelondroTree.defaultObjectCachePercent, new kelondroRow(bufferStructure(assortmentLength)), true);
assortments = new kelondroTree(assortmentFile, bufferSize, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(bufferStructure(assortmentLength)), true);
if (log != null) log.logConfig("Created new Assortment Database, width " + assortmentLength + ", " + bufferkb + "kb buffer");
}
@ -243,7 +245,7 @@ public final class plasmaWordIndexAssortment {
if (!(assortmentFile.delete())) throw new RuntimeException("cannot delete assortment database");
}
if (assortmentFile.exists()) assortmentFile.delete();
assortments = new kelondroTree(assortmentFile, bufferSize, kelondroTree.defaultObjectCachePercent, new kelondroRow(bufferStructure(assortmentLength)), true);
assortments = new kelondroTree(assortmentFile, bufferSize, preloadTime, kelondroTree.defaultObjectCachePercent, new kelondroRow(bufferStructure(assortmentLength)), true);
}
public Iterator hashes(String startWordHash, boolean up, boolean rot) throws IOException {

@ -72,7 +72,7 @@ public final class plasmaWordIndexAssortmentCluster extends indexAbstractRI impl
private plasmaWordIndexAssortment[] assortments;
private long completeBufferKB;
public plasmaWordIndexAssortmentCluster(File assortmentsPath, int clusterCount, int bufferkb, serverLog log) {
public plasmaWordIndexAssortmentCluster(File assortmentsPath, int clusterCount, int bufferkb, long preloadTime, serverLog log) {
// set class variables
if (!(assortmentsPath.exists())) assortmentsPath.mkdirs();
this.clusterCount = clusterCount;
@ -86,7 +86,7 @@ public final class plasmaWordIndexAssortmentCluster extends indexAbstractRI impl
int sumSizes = 1;
plasmaWordIndexAssortment testAssortment;
for (int i = 0; i < clusterCount; i++) {
testAssortment = new plasmaWordIndexAssortment(assortmentsPath, i + 1, 0, null);
testAssortment = new plasmaWordIndexAssortment(assortmentsPath, i + 1, 0, 0, null);
sizes[i] = testAssortment.size() + clusterCount - i;
sumSizes += sizes[i];
testAssortment.close();
@ -96,7 +96,11 @@ public final class plasmaWordIndexAssortmentCluster extends indexAbstractRI impl
// initialize cluster using the cluster elements size for optimal buffer
// size
for (int i = 0; i < clusterCount; i++) {
assortments[i] = new plasmaWordIndexAssortment(assortmentsPath, i + 1, (int) (completeBufferKB * (long) sizes[i] / (long) sumSizes), log);
assortments[i] = new plasmaWordIndexAssortment(
assortmentsPath, i + 1,
(int) (completeBufferKB * (long) sizes[i] / (long) sumSizes),
preloadTime * (long) sizes[i] / (long) sumSizes,
log);
}
}

@ -92,13 +92,13 @@ public final class plasmaWordIndexFile {
if (cacheSize > 1048576) cacheSize = 1048576;
if (theLocation.exists()) try {
// open existing index file
kt = new kelondroTree(theLocation, cacheSize, kelondroTree.defaultObjectCachePercent);
kt = new kelondroTree(theLocation, cacheSize, 1000, kelondroTree.defaultObjectCachePercent);
} catch (IOException e) {
theLocation.delete();
kt = new kelondroTree(theLocation, cacheSize, kelondroTree.defaultObjectCachePercent, indexURL.urlHashLength, indexURLEntry.encodedStringFormLength(), false);
kt = new kelondroTree(theLocation, cacheSize, 1000, kelondroTree.defaultObjectCachePercent, indexURL.urlHashLength, indexURLEntry.encodedStringFormLength(), false);
} else {
// create new index file
kt = new kelondroTree(theLocation, cacheSize, kelondroTree.defaultObjectCachePercent, indexURL.urlHashLength, indexURLEntry.encodedStringFormLength(), false);
kt = new kelondroTree(theLocation, cacheSize, 1000, kelondroTree.defaultObjectCachePercent, indexURL.urlHashLength, indexURLEntry.encodedStringFormLength(), false);
}
return kt; // everyone who get this should close it when finished!
}

@ -173,18 +173,20 @@ public class yacyCore {
// create or init seed cache
int memDHT = Integer.parseInt(switchboard.getConfig("ramCacheDHT", "1024")) / 1024;
long memDHT_time = Long.parseLong(switchboard.getConfig("ramCacheDHT_time", "1000"));
log.logConfig("DHT Cache memory = " + memDHT + " KB");
seedDB = new yacySeedDB(
sb,
new File(yacyDBPath, "seed.new.db"),
new File(yacyDBPath, "seed.old.db"),
new File(yacyDBPath, "seed.pot.db"),
memDHT);
memDHT, memDHT_time);
// create or init news database
int memNews = Integer.parseInt(switchboard.getConfig("ramCacheNews", "1024")) / 1024;
long memNews_time = Long.parseLong(switchboard.getConfig("ramCacheNews_time", "1000"));
log.logConfig("News Cache memory = " + memNews + " KB");
newsPool = new yacyNewsPool(yacyDBPath, memNews);
newsPool = new yacyNewsPool(yacyDBPath, memNews, memNews_time);
loadSeedUploadMethods();

@ -61,6 +61,7 @@ public class yacyNewsDB {
private File path;
private int bufferkb;
private long preloadTime;
private kelondroTree news;
public static final int attributesMaxLength = yacyNewsRecord.maxNewsRecordLength
@ -69,16 +70,17 @@ public class yacyNewsDB {
- yacyCore.universalDateShortPattern.length()
- 2;
public yacyNewsDB(File path, int bufferkb) {
public yacyNewsDB(File path, int bufferkb, long preloadTime) {
this.path = path;
this.bufferkb = bufferkb;
this.preloadTime = preloadTime;
if (path.exists()) try {
news = new kelondroTree(path, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent);
news = new kelondroTree(path, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent);
} catch (IOException e) {
news = createDB(path, bufferkb);
news = createDB(path, bufferkb, preloadTime);
} else {
news = createDB(path, bufferkb);
news = createDB(path, bufferkb, preloadTime);
}
}
@ -90,14 +92,14 @@ public class yacyNewsDB {
new kelondroColumn("", kelondroColumn.celltype_string, attributesMaxLength, kelondroColumn.encoder_string, attributesMaxLength, ""),
});
private static kelondroTree createDB(File path, int bufferkb) {
return new kelondroTree(path, bufferkb * 0x400, kelondroTree.defaultObjectCachePercent, rowdef, true);
private static kelondroTree createDB(File path, int bufferkb, long preloadTime) {
return new kelondroTree(path, bufferkb * 0x400, preloadTime, kelondroTree.defaultObjectCachePercent, rowdef, true);
}
private void resetDB() {
try {close();} catch (Exception e) {}
if (path.exists()) path.delete();
news = createDB(path, bufferkb);
news = createDB(path, bufferkb, preloadTime);
}
public int dbCacheNodeChunkSize() {

@ -93,8 +93,8 @@ public class yacyNewsPool {
private int maxDistribution;
public yacyNewsPool(File yacyDBPath, int bufferkb) {
newsDB = new yacyNewsDB(new File(yacyDBPath, "news1.db"), bufferkb);
public yacyNewsPool(File yacyDBPath, int bufferkb, long preloadTime) {
newsDB = new yacyNewsDB(new File(yacyDBPath, "news1.db"), bufferkb, preloadTime);
outgoingNews = new yacyNewsQueue(new File(yacyDBPath, "newsOut1.stack"), newsDB);
publishedNews = new yacyNewsQueue(new File(yacyDBPath, "newsPublished1.stack"), newsDB);
incomingNews = new yacyNewsQueue(new File(yacyDBPath, "newsIn1.stack"), newsDB);

@ -64,7 +64,7 @@ public class yacyNewsQueue {
this.newsDB = newsDB;
if (path.exists()) try {
queueStack = new kelondroStack(path, 0);
queueStack = new kelondroStack(path);
} catch (kelondroException e) {
path.delete();
queueStack = createStack(path);
@ -82,7 +82,7 @@ public class yacyNewsQueue {
});
private static kelondroStack createStack(File path) {
return new kelondroStack(path, 0, rowdef, true);
return new kelondroStack(path, rowdef, true);
}
private void resetDB() {

@ -93,6 +93,7 @@ public final class yacySeedDB {
private kelondroMap seedActiveDB, seedPassiveDB, seedPotentialDB;
private int seedDBBufferKB;
private long preloadTime;
public final plasmaSwitchboard sb;
public yacySeed mySeed; // my own seed
@ -104,7 +105,7 @@ public final class yacySeedDB {
File seedActiveDBFile,
File seedPassiveDBFile,
File seedPotentialDBFile,
int bufferkb) {
int bufferkb, long preloadTime) {
this.seedDBBufferKB = bufferkb;
this.seedActiveDBFile = seedActiveDBFile;
@ -112,6 +113,7 @@ public final class yacySeedDB {
this.seedPotentialDBFile = seedPotentialDBFile;
this.mySeed = null; // my own seed
this.sb = sb;
this.preloadTime = preloadTime;
// set up seed database
seedActiveDB = openSeedTable(seedActiveDBFile);
@ -195,7 +197,7 @@ public final class yacySeedDB {
private synchronized kelondroMap openSeedTable(File seedDBFile) {
if (seedDBFile.exists()) try {
// open existing seed database
return new kelondroMap(new kelondroDyn(seedDBFile, (seedDBBufferKB * 0x400) / 3, '#'), sortFields, accFields);
return new kelondroMap(new kelondroDyn(seedDBFile, (seedDBBufferKB * 0x400) / 3, preloadTime / 3, '#'), sortFields, accFields);
} catch (kelondroException e) {
// if we have an error, we start with a fresh database
if (seedDBFile.exists()) seedDBFile.delete();
@ -205,7 +207,7 @@ public final class yacySeedDB {
}
// create new seed database
new File(seedDBFile.getParent()).mkdir();
return new kelondroMap(new kelondroDyn(seedDBFile, (seedDBBufferKB * 0x400) / 3, commonHashLength, 480, '#', true), sortFields, accFields);
return new kelondroMap(new kelondroDyn(seedDBFile, (seedDBBufferKB * 0x400) / 3, preloadTime / 3, commonHashLength, 480, '#', true), sortFields, accFields);
}
private synchronized kelondroMap resetSeedTable(kelondroMap seedDB, File seedDBFile) {

@ -137,7 +137,7 @@ public class migration {
file.delete();
} catch (IOException e) {}
}
sb.initWiki((int) sb.getConfigLong("ramCacheWiki", 1024) / 1024);
sb.initWiki((int) sb.getConfigLong("ramCacheWiki", 1024) / 1024, sb.getConfigLong("ramCacheWiki_time", 1000));
}
@ -150,7 +150,7 @@ public class migration {
serverFileUtils.copy(file, file2);
file.delete();
} catch (IOException e) {}
sb.initMessages((int) sb.getConfigLong("ramCacheMessage", 1024) / 1024);
sb.initMessages((int) sb.getConfigLong("ramCacheMessage", 1024) / 1024, sb.getConfigLong("ramCacheMessage_time", 1000));
}
}

@ -663,7 +663,7 @@ public final class yacy {
File dbroot = new File(new File(homePath), "DATA/PLASMADB");
serverLog log = new serverLog("WORDMIGRATION");
log.logInfo("STARTING MIGRATION");
plasmaWordIndex wordIndexCache = new plasmaWordIndex(dbroot, 20000, log);
plasmaWordIndex wordIndexCache = new plasmaWordIndex(dbroot, 20000, 10000, log);
enumerateFiles words = new enumerateFiles(new File(dbroot, "WORDS"), true, false, true, true);
String wordhash;
File wordfile;
@ -708,16 +708,16 @@ public final class yacy {
// db containing all currently loades urls
int cache = dbcache * 1024; // in KB
log.logFine("URLDB-Caches: "+cache+" bytes");
plasmaCrawlLURL currentUrlDB = new plasmaCrawlLURL(new File(dbroot, "urlHash.db"), cache);
plasmaCrawlLURL currentUrlDB = new plasmaCrawlLURL(new File(dbroot, "urlHash.db"), cache, 10000);
// db used to hold all neede urls
plasmaCrawlLURL minimizedUrlDB = new plasmaCrawlLURL(new File(dbroot, "urlHash.temp.db"), cache);
plasmaCrawlLURL minimizedUrlDB = new plasmaCrawlLURL(new File(dbroot, "urlHash.temp.db"), cache, 10000);
Runtime rt = Runtime.getRuntime();
int cacheMem = (int)((rt.maxMemory()-rt.totalMemory())/1024)-(2*cache + 8*1024);
if (cacheMem < 2048) throw new OutOfMemoryError("Not enough memory available to start clean up.");
plasmaWordIndex wordIndex = new plasmaWordIndex(dbroot, cacheMem, log);
plasmaWordIndex wordIndex = new plasmaWordIndex(dbroot, cacheMem, 10000, log);
Iterator wordHashIterator = wordIndex.wordHashes("------------", plasmaWordIndex.RL_WORDFILES, false);
String wordhash;
@ -950,7 +950,7 @@ public final class yacy {
File root = new File(homePath);
try {
plasmaURLPool pool = new plasmaURLPool(new File(root, "DATA/PLASMADB"), 16000, 1000, 1000);
plasmaURLPool pool = new plasmaURLPool(new File(root, "DATA/PLASMADB"), 16000, 1000, 1000, 10000);
Iterator eiter = pool.loadedURL.entries(true, false);
HashSet doms = new HashSet();
plasmaCrawlLURL.Entry entry;
@ -1016,7 +1016,7 @@ public final class yacy {
private static void urllist(String homePath, boolean html, String targetName) {
File root = new File(homePath);
try {
plasmaURLPool pool = new plasmaURLPool(new File(root, "DATA/PLASMADB"), 16000, 1000, 1000);
plasmaURLPool pool = new plasmaURLPool(new File(root, "DATA/PLASMADB"), 16000, 1000, 1000, 10000);
Iterator eiter = pool.loadedURL.entries(true, false);
plasmaCrawlLURL.Entry entry;
File file = new File(root, targetName);
@ -1058,7 +1058,7 @@ public final class yacy {
File dbroot = new File(root, "DATA/PLASMADB");
serverLog log = new serverLog("URLDBCLEANUP");
try {
plasmaCrawlLURL currentUrlDB = new plasmaCrawlLURL(new File(dbroot, "urlHash.db"), 4194304);
plasmaCrawlLURL currentUrlDB = new plasmaCrawlLURL(new File(dbroot, "urlHash.db"), 4194304, 10000);
currentUrlDB.urldbcleanup();
currentUrlDB.close();
} catch (IOException e) {
@ -1077,14 +1077,14 @@ public final class yacy {
try {
Iterator WordHashIterator = null;
if (resource.equals("all")) {
WordIndex = new plasmaWordIndex(homeDBroot, 8*1024*1024, log);
WordIndex = new plasmaWordIndex(homeDBroot, 8*1024*1024, 3000, log);
WordHashIterator = WordIndex.wordHashes(wordChunkStartHash, plasmaWordIndex.RL_WORDFILES, false);
} else if (resource.equals("assortments")) {
plasmaWordIndexAssortmentCluster assortmentCluster = new plasmaWordIndexAssortmentCluster(new File(homeDBroot, "ACLUSTER"), 64, 16*1024*1024, log);
plasmaWordIndexAssortmentCluster assortmentCluster = new plasmaWordIndexAssortmentCluster(new File(homeDBroot, "ACLUSTER"), 64, 16*1024*1024, 3000, log);
WordHashIterator = assortmentCluster.wordHashes(wordChunkStartHash, true, false);
} else if (resource.startsWith("assortment")) {
int a = Integer.parseInt(resource.substring(10));
plasmaWordIndexAssortment assortment = new plasmaWordIndexAssortment(new File(homeDBroot, "ACLUSTER"), a, 8*1024*1024, null);
plasmaWordIndexAssortment assortment = new plasmaWordIndexAssortment(new File(homeDBroot, "ACLUSTER"), a, 8*1024*1024, 3000, null);
WordHashIterator = assortment.hashes(wordChunkStartHash, true, false);
} else if (resource.equals("words")) {
plasmaWordIndexFileCluster fileDB = new plasmaWordIndexFileCluster(homeDBroot, log);
@ -1147,7 +1147,7 @@ public final class yacy {
String[] dbFileNames = {"seed.new.db","seed.old.db","seed.pot.db"};
for (int i=0; i < dbFileNames.length; i++) {
File dbFile = new File(yacyDBPath,dbFileNames[i]);
kelondroMap db = new kelondroMap(new kelondroDyn(dbFile, (1024 * 0x400) / 3, '#'), yacySeedDB.sortFields, yacySeedDB.accFields);
kelondroMap db = new kelondroMap(new kelondroDyn(dbFile, (1024 * 0x400) / 3, 3000, '#'), yacySeedDB.sortFields, yacySeedDB.accFields);
kelondroMap.mapIterator it;
it = db.maps(true, false);

@ -496,43 +496,56 @@ xpstopw=true
# ram cache for database files
# ram cache for assortment cache cluster (for all 64 files)
ramCacheRWI = 8388608
ramCacheRWI = 8388608
ramCacheRWI_time = 30000
# ram cache for responseHeader.db
ramCacheHTTP = 1048576
ramCacheHTTP = 1048576
ramCacheHTTP_time = 1000
# ram cache for urlHash.db
ramCacheLURL = 8388608
ramCacheLURL = 8388608
ramCacheLURL_time = 10000
# ram cache for urlNotice.db
ramCacheNURL = 4194304
ramCacheNURL = 4194304
ramCacheNURL_time = 2000
# ram cache for urlErr.db
ramCacheEURL = 8192
ramCacheEURL = 8192
ramCacheEURL_time = 1000
# ram cache for seedDBs
ramCacheDHT = 131072
ramCacheDHT = 131072
ramCacheDHT_time = 1000
# ram cache for message.db
ramCacheMessage = 8192
ramCacheMessage = 8192
ramCacheMessage_time = 500
# ram cache for wiki.db
ramCacheWiki = 8192
ramCacheWiki = 8192
ramCacheWiki_time = 500
# ram cache for blog.db
ramCacheBlog = 2048
ramCacheBlog = 2048
ramCacheBlog_time = 500
# ram cache for news1.db
ramCacheNews = 1048576
ramCacheNews = 1048576
ramCacheNews_time = 1000
# ram cache for robotsTxt.db
ramCacheRobots = 4194304
ramCacheRobots = 4194304
ramCacheRobots_time = 3000
# ram cache for crawlProfile.db
ramCacheProfiles = 8192
ramCacheProfiles = 8192
ramCacheProfiles_time= 500
# ram cache for stack crawl thread db
ramCachePreNURL = 4194304
ramCachePreNURL = 4194304
ramCachePreNURL_time = 3000
# default memory settings for startup of yacy
# is only valid in unix/shell environments and

Loading…
Cancel
Save