added configuration to enable ram table copy

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6304 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 16 years ago
parent 8274251467
commit 573d03c7d7

@ -616,6 +616,19 @@ cleanup.deletionPublishedNews = true
javastart_Xmx=Xmx180m
javastart_Xms=Xms180m
# YaCy is able to use RAM copies of database tables. This needs a lot of RAM
# To switch copying of file tables int RAM on, use this property
# this value is automatically set to true, if more than one gigabyte RAM is available
ramcopy=false
# some java versions may be limited to a specific array size
# of 134217727 entries. To prevent that tables of that size are generated,
# set this property to false
# If you want to have better performance and switch ramcopy on, try also to
# set this property to true
# this value is automatically set to true, if more than two gigabyte is available
exceed134217727=false
# priority of the yacy-process
# is valid in unix/shell and windows environments but
# not for first startup of YaCy
@ -625,8 +638,8 @@ javastart_priority=10
# performance properties for the word index cache
# wordCacheMaxLow/High is the number of word indexes that shall be held in the
# ram cache during indexing. When YaCy is shut down, this cache must be
# flushed to disc; this may last some minutes.
# ram cache during indexing. If you want to increase indexing speed, increase this
# value i.e. up to one million, but increase also the memory limit to a minimum of 2GB
wordCacheMaxCount = 100000
# Specifies if yacy can be used as transparent http proxy.

@ -196,10 +196,10 @@ public class dbtest {
}
if (dbe.equals("kelondroSplitTable")) {
final File tablepath = new File(tablename).getParentFile();
return new SplitTable(tablepath, new File(tablename).getName(), testRow);
return new SplitTable(tablepath, new File(tablename).getName(), testRow, true, true);
}
if (dbe.equals("kelondroEcoTable")) {
return new Table(new File(tablename), testRow, Table.tailCacheForceUsage, 1000, 0);
return new Table(new File(tablename), testRow, 1000, 0, true, true);
}
if (dbe.equals("mysql")) {
return new SQLTable("mysql", testRow);

@ -56,8 +56,13 @@ public class Balancer {
private long minimumGlobalDelta;
private long lastDomainStackFill;
public Balancer(final File cachePath, final String stackname, final boolean fullram,
final long minimumLocalDelta, final long minimumGlobalDelta) {
public Balancer(
final File cachePath,
final String stackname,
final long minimumLocalDelta,
final long minimumGlobalDelta,
final boolean useTailCache,
final boolean exceed134217727) {
this.cacheStacksPath = cachePath;
this.domainStacks = new ConcurrentHashMap<String, LinkedList<String>>();
this.top = new ConcurrentLinkedQueue<String>();
@ -69,7 +74,7 @@ public class Balancer {
if (!(cachePath.exists())) cachePath.mkdir(); // make the path
cacheStacksPath.mkdirs();
File f = new File(cacheStacksPath, stackname + indexSuffix);
urlFileIndex = new Table(f, Request.rowdef, (fullram) ? Table.tailCacheUsageAuto : Table.tailCacheDenyUsage, EcoFSBufferSize, 0);
urlFileIndex = new Table(f, Request.rowdef, EcoFSBufferSize, 0, useTailCache, exceed134217727);
lastDomainStackFill = 0;
Log.logInfo("Balancer", "opened balancer file with " + urlFileIndex.size() + " entries from " + f.toString());
}

@ -71,7 +71,7 @@ public class CrawlQueues {
// start crawling management
log.logConfig("Starting Crawling Management");
noticeURL = new NoticedURL(queuePath);
noticeURL = new NoticedURL(queuePath, sb.useTailCache, sb.exceed134217727);
//errorURL = new plasmaCrawlZURL(); // fresh error DB each startup; can be hold in RAM and reduces IO;
final File errorDBFile = new File(queuePath, "urlError2.db");
if (errorDBFile.exists()) {
@ -79,8 +79,8 @@ public class CrawlQueues {
// this is useful because there is currently no re-use of the data in this table.
if (errorDBFile.isDirectory()) SplitTable.delete(queuePath, "urlError2.db"); else FileUtils.deletedelete(errorDBFile);
}
errorURL = new ZURL(queuePath, "urlError3.db", false);
delegatedURL = new ZURL(queuePath, "urlDelegated3.db", true);
errorURL = new ZURL(queuePath, "urlError3.db", false, sb.useTailCache, sb.exceed134217727);
delegatedURL = new ZURL(queuePath, "urlDelegated3.db", true, sb.useTailCache, sb.exceed134217727);
}
public void relocate(final File newQueuePath) {
@ -89,13 +89,13 @@ public class CrawlQueues {
this.workers = new ConcurrentHashMap<Integer, crawlWorker>();
this.remoteCrawlProviderHashes.clear();
noticeURL = new NoticedURL(newQueuePath);
noticeURL = new NoticedURL(newQueuePath, sb.useTailCache, sb.exceed134217727);
final File errorDBFile = new File(newQueuePath, "urlError2.db");
if (errorDBFile.exists()) {
if (errorDBFile.isDirectory()) SplitTable.delete(newQueuePath, "urlError2.db"); else FileUtils.deletedelete(errorDBFile);
}
errorURL = new ZURL(newQueuePath, "urlError3.db", false);
delegatedURL = new ZURL(newQueuePath, "urlDelegated3.db", true);
errorURL = new ZURL(newQueuePath, "urlError3.db", false, sb.useTailCache, sb.exceed134217727);
delegatedURL = new ZURL(newQueuePath, "urlDelegated3.db", true, sb.useTailCache, sb.exceed134217727);
}
public void close() {

@ -69,7 +69,7 @@ public class NoticeURLImporter extends AbstractImporter implements Importer {
// init noticeUrlDB
this.log.logInfo("Initializing the source noticeUrlDB");
this.importNurlDB = new NoticedURL(plasmaPath);
this.importNurlDB = new NoticedURL(plasmaPath, false, false);
this.importStartSize = this.importNurlDB.size();
//int stackSize = this.importNurlDB.stackSize();

@ -51,12 +51,15 @@ public class NoticedURL {
private Balancer limitStack; // links found by crawling at target depth
private Balancer remoteStack; // links from remote crawl orders
public NoticedURL(final File cachePath) {
public NoticedURL(
final File cachePath,
final boolean useTailCache,
final boolean exceed134217727) {
Log.logInfo("NoticedURL", "CREATING STACKS at " + cachePath.toString());
this.coreStack = new Balancer(cachePath, "urlNoticeCoreStack", false, minimumLocalDeltaInit, minimumGlobalDeltaInit);
this.limitStack = new Balancer(cachePath, "urlNoticeLimitStack", false, minimumLocalDeltaInit, minimumGlobalDeltaInit);
this.coreStack = new Balancer(cachePath, "urlNoticeCoreStack", minimumLocalDeltaInit, minimumGlobalDeltaInit, useTailCache, exceed134217727);
this.limitStack = new Balancer(cachePath, "urlNoticeLimitStack", minimumLocalDeltaInit, minimumGlobalDeltaInit, useTailCache, exceed134217727);
//overhangStack = new plasmaCrawlBalancer(overhangStackFile);
this.remoteStack = new Balancer(cachePath, "urlNoticeRemoteStack", false, minimumLocalDeltaInit, minimumGlobalDeltaInit);
this.remoteStack = new Balancer(cachePath, "urlNoticeRemoteStack", minimumLocalDeltaInit, minimumGlobalDeltaInit, useTailCache, exceed134217727);
}
public long getMinimumLocalDelta() {

@ -62,7 +62,12 @@ public class ZURL {
protected final ObjectIndex urlIndex;
private final LinkedList<String> stack;
public ZURL(final File cachePath, final String tablename, final boolean startWithEmptyFile) {
public ZURL(
final File cachePath,
final String tablename,
final boolean startWithEmptyFile,
final boolean useTailCache,
final boolean exceed134217727) {
// creates a new ZURL in a file
cachePath.mkdirs();
final File f = new File(cachePath, tablename);
@ -71,7 +76,7 @@ public class ZURL {
if (f.isDirectory()) SplitTable.delete(cachePath, tablename); else FileUtils.deletedelete(f);
}
}
this.urlIndex = new Table(f, rowdef, Table.tailCacheDenyUsage, EcoFSBufferSize, 0);
this.urlIndex = new Table(f, rowdef, EcoFSBufferSize, 0, useTailCache, exceed134217727);
//urlIndex = new kelondroFlexTable(cachePath, tablename, -1, rowdef, 0, true);
this.stack = new LinkedList<String>();
}

@ -412,7 +412,7 @@ public class URLAnalysis {
public static int diffurlcol(String metadataPath, String statisticFile, String diffFile) throws IOException {
System.out.println("INDEX DIFF URL-COL startup");
HandleMap idx = new HandleMap(URLMetadataRow.rowdef.primaryKeyLength, URLMetadataRow.rowdef.objectOrder, 4, new File(statisticFile), 0);
MetadataRepository mr = new MetadataRepository(new File(metadataPath));
MetadataRepository mr = new MetadataRepository(new File(metadataPath), false, false);
HandleSet hs = new HandleSet(URLMetadataRow.rowdef.primaryKeyLength, URLMetadataRow.rowdef.objectOrder, 0, 1000000);
System.out.println("INDEX DIFF URL-COL loaded dump, starting diff");
long start = System.currentTimeMillis();
@ -439,7 +439,7 @@ public class URLAnalysis {
public static void export(String metadataPath, int format, String export, String diffFile) throws IOException {
// format: 0=text, 1=html, 2=rss/xml
System.out.println("URL EXPORT startup");
MetadataRepository mr = new MetadataRepository(new File(metadataPath));
MetadataRepository mr = new MetadataRepository(new File(metadataPath), false, false);
HandleSet hs = (diffFile == null) ? null : new HandleSet(URLMetadataRow.rowdef.primaryKeyLength, URLMetadataRow.rowdef.objectOrder, new File(diffFile), 0);
System.out.println("URL EXPORT loaded dump, starting export");
Export e = mr.export(new File(export), ".*", hs, format, false);
@ -453,7 +453,7 @@ public class URLAnalysis {
public static void delete(String metadataPath, String diffFile) throws IOException {
System.out.println("URL DELETE startup");
MetadataRepository mr = new MetadataRepository(new File(metadataPath));
MetadataRepository mr = new MetadataRepository(new File(metadataPath), false, false);
int mrSize = mr.size();
HandleSet hs = new HandleSet(URLMetadataRow.rowdef.primaryKeyLength, URLMetadataRow.rowdef.objectOrder, new File(diffFile), 0);
System.out.println("URL DELETE loaded dump, starting deletion of " + hs.size() + " entries from " + mrSize);

@ -38,9 +38,16 @@ public class Relations {
private final File baseDir;
private HashMap<String, ObjectIndex> relations;
private final boolean useTailCache;
private final boolean exceed134217727;
public Relations(final File location) {
public Relations(
final File location,
final boolean useTailCache,
final boolean exceed134217727) {
this.baseDir = location;
this.useTailCache = useTailCache;
this.exceed134217727 = exceed134217727;
}
private static Row rowdef(String filename) {
@ -81,14 +88,14 @@ public class Relations {
if (!list[i].equals(targetfilename)) continue;
final Row row = rowdef(list[i]);
if (row.primaryKeyLength != keysize || row.column(1).cellwidth != payloadsize) continue; // a wrong table
final ObjectIndex table = new Table(new File(baseDir, list[i]), row, Table.tailCacheUsageAuto, 1024*1024, 0);
final ObjectIndex table = new Table(new File(baseDir, list[i]), row, 1024*1024, 0, this.useTailCache, this.exceed134217727);
relations.put(name, table);
return;
}
}
// the relation does not exist, create it
final Row row = rowdef(keysize, payloadsize);
final ObjectIndex table = new Table(new File(baseDir, targetfilename), row, Table.tailCacheUsageAuto, 1024*1024, 0);
final ObjectIndex table = new Table(new File(baseDir, targetfilename), row, 1024*1024, 0, this.useTailCache, this.exceed134217727);
relations.put(name, table);
}
@ -101,7 +108,7 @@ public class Relations {
for (int i = 0; i < list.length; i++) {
if (list[i].startsWith(name)) {
final Row row = rowdef(list[i]);
final ObjectIndex table = new Table(new File(baseDir, list[i]), row, Table.tailCacheUsageAuto, 1024*1024, 0);
final ObjectIndex table = new Table(new File(baseDir, list[i]), row, 1024*1024, 0, this.useTailCache, this.exceed134217727);
relations.put(name, table);
return table;
}
@ -158,7 +165,7 @@ public class Relations {
}
public static void main(final String args[]) {
final Relations r = new Relations(new File("/Users/admin/"));
final Relations r = new Relations(new File("/Users/admin/"), true, true);
try {
final String table1 = "test1";
r.declareRelation(table1, 12, 30);

@ -84,12 +84,16 @@ public class SplitTable implements ObjectIndex {
private String current;
private long fileAgeLimit;
private long fileSizeLimit;
private boolean useTailCache;
private boolean exceed134217727;
public SplitTable(
final File path,
final String tablename,
final Row rowdef) {
this(path, tablename, rowdef, ArrayStack.oneMonth, (long) Integer.MAX_VALUE);
final Row rowdef,
final boolean useTailCache,
final boolean exceed134217727) {
this(path, tablename, rowdef, ArrayStack.oneMonth, (long) Integer.MAX_VALUE, useTailCache, exceed134217727);
}
public SplitTable(
@ -97,12 +101,16 @@ public class SplitTable implements ObjectIndex {
final String tablename,
final Row rowdef,
final long fileAgeLimit,
final long fileSizeLimit) {
final long fileSizeLimit,
final boolean useTailCache,
final boolean exceed134217727) {
this.path = path;
this.prefix = tablename;
this.rowdef = rowdef;
this.fileAgeLimit = fileAgeLimit;
this.fileSizeLimit = fileSizeLimit;
this.useTailCache = useTailCache;
this.exceed134217727 = exceed134217727;
this.entryOrder = new Row.EntryComparator(rowdef.objectOrder);
init();
}
@ -187,7 +195,7 @@ public class SplitTable implements ObjectIndex {
if (maxf != null) {
f = new File(path, maxf);
Log.logInfo("kelondroSplitTable", "opening partial eco table " + f);
table = new Table(f, rowdef, Table.tailCacheUsageAuto, EcoFSBufferSize, 0);
table = new Table(f, rowdef, EcoFSBufferSize, 0, this.useTailCache, this.exceed134217727);
tables.put(maxf, table);
}
}
@ -267,7 +275,7 @@ public class SplitTable implements ObjectIndex {
private ObjectIndex newTable() {
this.current = newFilename();
final File f = new File(path, this.current);
Table table = new Table(f, rowdef, Table.tailCacheDenyUsage, EcoFSBufferSize, 0);
Table table = new Table(f, rowdef, EcoFSBufferSize, 0, this.useTailCache, this.exceed134217727);
tables.put(this.current, table);
return table;
}

@ -69,10 +69,6 @@ public class Table implements ObjectIndex {
// static tracker objects
private static TreeMap<String, Table> tableTracker = new TreeMap<String, Table>();
public static final int tailCacheDenyUsage = 0;
public static final int tailCacheForceUsage = 1;
public static final int tailCacheUsageAuto = 2;
public static final long maxarraylength = 134217727L; // that may be the maxmimum size of array length in some JVMs
private static final long minmemremaining = 20 * 1024 * 1024; // if less than this memory is remaininig, the memory copy of a table is abandoned
private int fail;
@ -84,7 +80,13 @@ public class Table implements ObjectIndex {
protected RowSet table;
protected Row taildef;
public Table(final File tablefile, final Row rowdef, final int useTailCache, final int buffersize, final int initialSpace) {
public Table(
final File tablefile,
final Row rowdef,
final int buffersize,
final int initialSpace,
final boolean useTailCache,
final boolean exceed134217727) {
this.tablefile = tablefile;
this.rowdef = rowdef;
this.buffersize = buffersize;
@ -118,9 +120,8 @@ public class Table implements ObjectIndex {
// initialize index and copy table
final int records = Math.max(fileSize, initialSpace);
final long neededRAM4table = (records) * ((rowdef.objectsize) + 4L) * 3L;
table = (/*(neededRAM4table < maxarraylength) &&*/
((useTailCache == tailCacheForceUsage) ||
((useTailCache == tailCacheUsageAuto) && (MemoryControl.available() > neededRAM4table + 200 * 1024 * 1024)))) ?
table = ((exceed134217727 || neededRAM4table < maxarraylength) &&
(useTailCache && MemoryControl.available() > neededRAM4table + 200 * 1024 * 1024)) ?
new RowSet(taildef, records) : null;
Log.logInfo("TABLE", "initialization of " + tablefile.getName() + ". table copy: " + ((table == null) ? "no" : "yes") + ", available RAM: " + (MemoryControl.available() / 1024 / 1024) + "MB, needed: " + (neededRAM4table/1024/1024 + 200) + "MB, allocating space for " + records + " entries");
final long neededRAM4index = 2 * 1024 * 1024 + records * (rowdef.primaryKeyLength + 4) * 3 / 2;
@ -268,7 +269,7 @@ public class Table implements ObjectIndex {
map.put("tableKeyChunkSize", Integer.toString(index.row().objectsize));
map.put("tableKeyMem", Integer.toString((int) (((long) index.row().objectsize) * ((long) index.size()) * RowCollection.growfactor100 / 100L)));
map.put("tableValueChunkSize", (table == null) ? "0" : Integer.toString(table.row().objectsize));
map.put("tableValueMem", (table == null) ? "0" : Integer.toString((int) (((long) index.row().objectsize) * ((long) index.size()) * RowCollection.growfactor100 / 100L)));
map.put("tableValueMem", (table == null) ? "0" : Integer.toString((int) (((long) table.row().objectsize) * ((long) table.size()) * RowCollection.growfactor100 / 100L)));
return map;
}
@ -747,10 +748,10 @@ public class Table implements ObjectIndex {
return result;
}
private static ObjectIndex testTable(final File f, final String testentities, final int testcase) throws IOException {
private static ObjectIndex testTable(final File f, final String testentities, final boolean useTailCache, final boolean exceed134217727) throws IOException {
if (f.exists()) FileUtils.deletedelete(f);
final Row rowdef = new Row("byte[] a-4, byte[] b-4", NaturalOrder.naturalOrder);
final ObjectIndex tt = new Table(f, rowdef, testcase, 100, 0);
final ObjectIndex tt = new Table(f, rowdef, 100, 0, useTailCache, exceed134217727);
byte[] b;
final Row.Entry row = rowdef.newEntry();
for (int i = 0; i < testentities.length(); i++) {
@ -779,7 +780,7 @@ public class Table implements ObjectIndex {
return count;
}
public static void bigtest(final int elements, final File testFile, final int testcase) {
public static void bigtest(final int elements, final File testFile, final boolean useTailCache, final boolean exceed134217727) {
System.out.println("starting big test with " + elements + " elements:");
final long start = System.currentTimeMillis();
final String[] s = permutations(elements);
@ -788,13 +789,13 @@ public class Table implements ObjectIndex {
for (int i = 0; i < s.length; i++) {
System.out.println("*** probing tree " + i + " for permutation " + s[i]);
// generate tree and delete elements
tt = testTable(testFile, s[i], testcase);
tt = testTable(testFile, s[i], useTailCache, exceed134217727);
if (countElements(tt) != tt.size()) {
System.out.println("wrong size for " + s[i]);
}
tt.close();
for (int j = 0; j < s.length; j++) {
tt = testTable(testFile, s[i], testcase);
tt = testTable(testFile, s[i], useTailCache, exceed134217727);
// delete by permutation j
for (int elt = 0; elt < s[j].length(); elt++) {
tt.remove(testWord(s[j].charAt(elt)));
@ -830,9 +831,9 @@ public class Table implements ObjectIndex {
// open a file, add one entry and exit
final File f = new File(args[0]);
System.out.println("========= Testcase: no tail cache:");
bigtest(5, f, tailCacheDenyUsage);
bigtest(5, f, false, false);
System.out.println("========= Testcase: with tail cache:");
bigtest(5, f, tailCacheForceUsage);
bigtest(5, f, true, true);
/*
kelondroRow row = new kelondroRow("byte[] key-4, byte[] x-5", kelondroNaturalOrder.naturalOrder, 0);
try {

@ -63,9 +63,12 @@ public final class MetadataRepository implements Iterable<byte[]> {
private File location;
private ArrayList<hostStat> statsDump;
public MetadataRepository(final File path) {
public MetadataRepository(
final File path,
final boolean useTailCache,
final boolean exceed134217727) {
this.location = path;
this.urlIndexFile = new Cache(new SplitTable(this.location, "urls", URLMetadataRow.rowdef));
this.urlIndexFile = new Cache(new SplitTable(this.location, "urls", URLMetadataRow.rowdef, useTailCache, exceed134217727));
this.exportthread = null; // will have a export thread assigned if exporter is running
this.statsDump = null;
}

@ -79,7 +79,9 @@ public final class Segment {
final Log log,
final File segmentPath,
final int entityCacheMaxSize,
final long maxFileSize) throws IOException {
final long maxFileSize,
final boolean useTailCache,
final boolean exceed134217727) throws IOException {
log.logInfo("Initializing Segment '" + segmentPath + "', word hash cache size is " + Word.hashCacheSize + ".");
@ -114,7 +116,7 @@ public final class Segment {
if (!metadatadir.exists()) metadatadir.mkdirs();
// create LURL-db
urlMetadata = new MetadataRepository(metadatadir);
urlMetadata = new MetadataRepository(metadatadir, useTailCache, exceed134217727);
}
public MetadataRepository urlMetadata() {

@ -272,7 +272,8 @@ public final class Switchboard extends serverAbstractSwitch implements serverSwi
public serverProcessor<indexingQueueEntry> indexingStorageProcessor;
public RobotsTxtConfig robotstxtConfig = null;
public boolean useTailCache;
public boolean exceed134217727;
private final serverSemaphore shutdownSync = new serverSemaphore(0);
private boolean terminate = false;
@ -303,6 +304,12 @@ public final class Switchboard extends serverAbstractSwitch implements serverSwi
// remote proxy configuration
RemoteProxyConfig.init(this);
// memory configuration
this.useTailCache = getConfigBool("ramcopy", true);
if (MemoryControl.available() > 1024 * 1024 * 1024 * 1) this.useTailCache = true;
this.exceed134217727 = getConfigBool("exceed134217727", true);
if (MemoryControl.available() > 1024 * 1024 * 1024 * 2) this.exceed134217727 = true;
// load values from configs
final File indexPath = getConfigPath(SwitchboardConstants.INDEX_PRIMARY_PATH, SwitchboardConstants.INDEX_PATH_DEFAULT);
this.log.logConfig("Index Primary Path: " + indexPath.toString());
@ -353,12 +360,16 @@ public final class Switchboard extends serverAbstractSwitch implements serverSwi
"seed.pot.heap",
mySeedFile,
redundancy,
partitionExponent);
partitionExponent,
this.useTailCache,
this.exceed134217727);
indexSegment = new Segment(
log,
new File(new File(indexPath, networkName), "TEXT"),
wordCacheMaxCount,
fileSizeMax);
fileSizeMax,
this.useTailCache,
this.exceed134217727);
crawler = new CrawlSwitchboard(
peers,
networkName,
@ -842,13 +853,17 @@ public final class Switchboard extends serverAbstractSwitch implements serverSwi
"seed.pot.heap",
mySeedFile,
redundancy,
partitionExponent);
partitionExponent,
this.useTailCache,
this.exceed134217727);
try {
indexSegment = new Segment(
log,
new File(new File(indexPrimaryPath, networkName), "TEXT"),
wordCacheMaxCount,
fileSizeMax);
fileSizeMax,
this.useTailCache,
this.exceed134217727);
} catch (IOException e) {
e.printStackTrace();
}

@ -222,7 +222,14 @@ public class CRProcess {
return true;
}
public static void accumulate(final File from_dir, final File tmp_dir, final File err_dir, final File bkp_dir, final File to_file, int max_files, final boolean newdb) throws IOException {
public static void accumulate(
final File from_dir,
final File tmp_dir,
final File err_dir,
final File bkp_dir,
final File to_file,
int max_files,
final boolean newdb) throws IOException {
if (!(from_dir.isDirectory())) {
System.out.println("source path " + from_dir + " is not a directory.");
return;
@ -246,7 +253,7 @@ public class CRProcess {
IndexCell<WordReference> newseq = null;
if (newdb) {
final File path = to_file.getParentFile(); // path to storage place
newacc = new Table(new File(path, CRG_accname), CRG_accrow, Table.tailCacheUsageAuto, 0, 0);
newacc = new Table(new File(path, CRG_accname), CRG_accrow, 0, 0, true, false);
newseq = new IndexCell<WordReference>(
path,
Segment.wordReferenceFactory,

@ -63,16 +63,19 @@ public class yacyNewsDB {
private final File path;
protected ObjectIndex news;
public yacyNewsDB(final File path) {
public yacyNewsDB(
final File path,
final boolean useTailCache,
final boolean exceed134217727) {
this.path = path;
this.news = new Table(path, yacyNewsRecord.rowdef, Table.tailCacheUsageAuto, 10, 0);
this.news = new Table(path, yacyNewsRecord.rowdef, 10, 0, useTailCache, exceed134217727);
//this.news = new kelondroCache(kelondroTree.open(path, true, preloadTime, yacyNewsRecord.rowdef));
}
private void resetDB() {
try {close();} catch (final Exception e) {}
if (path.exists()) FileUtils.deletedelete(path);
this.news = new Table(path, yacyNewsRecord.rowdef, Table.tailCacheUsageAuto, 10, 0);
this.news = new Table(path, yacyNewsRecord.rowdef, 10, 0, false, false);
}
public void close() {

@ -263,8 +263,11 @@ public class yacyNewsPool {
private final yacyNewsQueue outgoingNews, publishedNews, incomingNews, processedNews;
private final int maxDistribution;
public yacyNewsPool(final File yacyDBPath) {
newsDB = new yacyNewsDB(new File(yacyDBPath, "news.db"));
public yacyNewsPool(
final File yacyDBPath,
final boolean useTailCache,
final boolean exceed134217727) {
newsDB = new yacyNewsDB(new File(yacyDBPath, "news.db"), useTailCache, exceed134217727);
outgoingNews = new yacyNewsQueue(new File(yacyDBPath, "newsOut.stack"), newsDB);
publishedNews = new yacyNewsQueue(new File(yacyDBPath, "newsPublished.stack"), newsDB);
incomingNews = new yacyNewsQueue(new File(yacyDBPath, "newsIn.stack"), newsDB);

@ -109,7 +109,9 @@ public final class yacySeedDB implements AlternativeDomainNames {
final String seedPotentialDBFileName,
final File myOwnSeedFile,
final int redundancy,
final int partitionExponent) {
final int partitionExponent,
final boolean useTailCache,
final boolean exceed134217727) {
this.seedActiveDBFile = new File(networkRoot, seedActiveDBFileName);
this.seedPassiveDBFile = new File(networkRoot, seedPassiveDBFileName);
this.seedPotentialDBFile = new File(networkRoot, seedPotentialDBFileName);
@ -138,7 +140,7 @@ public final class yacySeedDB implements AlternativeDomainNames {
HTTPDemon.setAlternativeResolver(this);
// create or init news database
this.newsPool = new yacyNewsPool(networkRoot);
this.newsPool = new yacyNewsPool(networkRoot, useTailCache, exceed134217727);
// deploy peer actions
this.peerActions = new yacyPeerActions(this, newsPool);

@ -649,10 +649,10 @@ public final class yacy {
log.logInfo("STARTING URL CLEANUP");
// db containing all currently loades urls
final MetadataRepository currentUrlDB = new MetadataRepository(new File(new File(indexPrimaryRoot, networkName), "TEXT"));
final MetadataRepository currentUrlDB = new MetadataRepository(new File(new File(indexPrimaryRoot, networkName), "TEXT"), false, false);
// db used to hold all neede urls
final MetadataRepository minimizedUrlDB = new MetadataRepository(new File(new File(indexRoot2, networkName), "TEXT"));
final MetadataRepository minimizedUrlDB = new MetadataRepository(new File(new File(indexRoot2, networkName), "TEXT"), false, false);
final int cacheMem = (int)(MemoryControl.maxMemory - MemoryControl.total());
if (cacheMem < 2048000) throw new OutOfMemoryError("Not enough memory available to start clean up.");
@ -661,7 +661,7 @@ public final class yacy {
log,
new File(new File(indexPrimaryRoot, "freeworld"), "TEXT"),
10000,
(long) Integer.MAX_VALUE);
(long) Integer.MAX_VALUE, false, false);
final Iterator<ReferenceContainer<WordReference>> indexContainerIterator = wordIndex.termIndex().references("AAAAAAAAAAAA".getBytes(), false, false);
long urlCounter = 0, wordCounter = 0;
@ -835,7 +835,7 @@ public final class yacy {
final File root = homePath;
final File indexroot = new File(root, "DATA/INDEX");
try {Log.configureLogging(homePath, new File(homePath, "DATA/LOG/yacy.logging"));} catch (final Exception e) {}
final MetadataRepository currentUrlDB = new MetadataRepository(new File(new File(indexroot, networkName), "TEXT"));
final MetadataRepository currentUrlDB = new MetadataRepository(new File(new File(indexroot, networkName), "TEXT"), false, false);
currentUrlDB.deadlinkCleaner(null);
currentUrlDB.close();
}
@ -855,7 +855,7 @@ public final class yacy {
log,
new File(new File(indexPrimaryRoot, "freeworld"), "TEXT"),
10000,
(long) Integer.MAX_VALUE);
(long) Integer.MAX_VALUE, false, false);
indexContainerIterator = WordIndex.termIndex().references(wordChunkStartHash.getBytes(), false, false);
}
int counter = 0;

Loading…
Cancel
Save