- changed initialization order to prefer allocation of memory for table

files first
- bugfixes in memory amount calculation
pull/1/head
Michael Peter Christen 13 years ago
parent 0746308bc2
commit 2280a7b276

@ -71,7 +71,7 @@ public class Table implements Index, Iterable<Row.Entry> {
// static tracker objects
private final static TreeMap<String, Table> tableTracker = new TreeMap<String, Table>();
private final static long maxarraylength = 134217727L; // that may be the maximum size of array length in some JVMs
private final static long maxarraylength = 134217727L; // (2^27-1) that may be the maximum size of array length in some JVMs
private final long minmemremaining; // if less than this memory is remaininig, the memory copy of a table is abandoned
private final int buffersize;
@ -122,7 +122,7 @@ public class Table implements Index, Iterable<Row.Entry> {
// initialize index and copy table
final int records = Math.max(fileSize, initialSpace);
final long neededRAM4table = 200L * 1024L * 1024L + records * (rowdef.objectsize + rowdef.primaryKeyLength) * 3L / 2L;
final long neededRAM4table = 200L * 1024L * 1024L + records * (this.taildef.objectsize + rowdef.primaryKeyLength + 4L) * 3L / 2L;
this.table = null;
try {
this.table = ((exceed134217727 || neededRAM4table < maxarraylength) &&
@ -132,13 +132,13 @@ public class Table implements Index, Iterable<Row.Entry> {
} catch (Throwable e) {
this.table = null;
}
Log.logInfo("TABLE", "initialization of " + tablefile.getName() + ". table copy: " + ((this.table == null) ? "no" : "yes") + ", available RAM: " + (MemoryControl.available() / 1024 / 1024) + "MB, needed: " + (neededRAM4table/1024/1024 + 200) + "MB, allocating space for " + records + " entries");
final long neededRAM4index = 200L * 1024L * 1024L + records * rowdef.primaryKeyLength * 3L / 2L;
Log.logInfo("TABLE", "initialization of " + tablefile.getName() + ". table copy: " + ((this.table == null) ? "no" : "yes") + ", available RAM: " + (MemoryControl.available() / 1024L / 1024L) + "MB, needed: " + (neededRAM4table / 1024L / 1024L) + "MB, allocating space for " + records + " entries");
final long neededRAM4index = 200L * 1024L * 1024L + records * (rowdef.primaryKeyLength + 4L) * 3L / 2L;
if (!MemoryControl.request(neededRAM4index, true)) {
// despite calculations seemed to show that there is enough memory for the table AND the index
// there is now not enough memory left for the index. So delete the table again to free the memory
// for the index
Log.logSevere("TABLE", tablefile.getName() + ": not enough RAM (" + (MemoryControl.available() / 1024L / 1024L) + "MB) left for index, deleting allocated table space to enable index space allocation (needed: " + (neededRAM4index / 1024 / 1024) + "MB)");
Log.logSevere("TABLE", tablefile.getName() + ": not enough RAM (" + (MemoryControl.available() / 1024L / 1024L) + "MB) left for index, deleting allocated table space to enable index space allocation (needed: " + (neededRAM4index / 1024L / 1024L) + "MB)");
this.table = null; System.gc();
Log.logSevere("TABLE", tablefile.getName() + ": RAM after releasing the table: " + (MemoryControl.available() / 1024L / 1024L) + "MB");
}
@ -714,6 +714,7 @@ public class Table implements Index, Iterable<Row.Entry> {
return remove(key) != null;
}
@Override
public synchronized Entry remove(final byte[] key) throws IOException {
assert this.file.size() == this.index.size() : "file.size() = " + this.file.size() + ", index.size() = " + this.index.size();
assert this.table == null || this.table.size() == this.index.size() : "table.size() = " + this.table.size() + ", index.size() = " + this.index.size();

@ -370,18 +370,6 @@ public final class Switchboard extends serverSwitch
this.queuesRoot = new File(new File(indexPath, networkName), "QUEUES");
this.networkRoot.mkdirs();
this.queuesRoot.mkdirs();
final File mySeedFile = new File(this.networkRoot, SeedDB.DBFILE_OWN_SEED);
this.peers =
new SeedDB(
this.networkRoot,
"seed.new.heap",
"seed.old.heap",
"seed.pot.heap",
mySeedFile,
redundancy,
partitionExponent,
false,
this.exceed134217727);
// initialize index
ReferenceContainer.maxReferences = getConfigInt("index.maxReferences", 0);
@ -397,6 +385,50 @@ public final class Switchboard extends serverSwitch
// set the default segment names
setDefaultSegments();
// prepare a solr index profile switch list
final File solrBackupProfile = new File("defaults/solr.keys.list");
final String schemename =
getConfig("federated.service.solr.indexing.schemefile", "solr.keys.default.list");
final File solrWorkProfile = new File(getDataPath(), "DATA/SETTINGS/" + schemename);
if ( !solrWorkProfile.exists() ) {
FileUtils.copy(solrBackupProfile, solrWorkProfile);
}
final SolrConfiguration backupScheme = new SolrConfiguration(solrBackupProfile);
this.solrScheme = new SolrConfiguration(solrWorkProfile);
// update the working scheme with the backup scheme. This is necessary to include new features.
// new features are always activated by default (if activated in input-backupScheme)
this.solrScheme.fill(backupScheme, true);
// set up the solr interface
final String solrurls = getConfig("federated.service.solr.indexing.url", "http://127.0.0.1:8983/solr");
final boolean usesolr = getConfigBool("federated.service.solr.indexing.enabled", false) & solrurls.length() > 0;
try {
this.indexSegments.segment(Segments.Process.LOCALCRAWLING).connectSolr(
(usesolr) ? new SolrShardingConnector(
solrurls,
SolrShardingSelection.Method.MODULO_HOST_MD5,
10000, true) : null);
} catch ( final IOException e ) {
Log.logException(e);
this.indexSegments.segment(Segments.Process.LOCALCRAWLING).connectSolr(null);
}
// initialize network database
final File mySeedFile = new File(this.networkRoot, SeedDB.DBFILE_OWN_SEED);
this.peers =
new SeedDB(
this.networkRoot,
"seed.new.heap",
"seed.old.heap",
"seed.pot.heap",
mySeedFile,
redundancy,
partitionExponent,
false,
this.exceed134217727);
// load domainList
try {
this.domainList = null;
@ -634,36 +666,6 @@ public final class Switchboard extends serverSwitch
TextParser.setDenyMime(getConfig(SwitchboardConstants.PARSER_MIME_DENY, ""));
TextParser.setDenyExtension(getConfig(SwitchboardConstants.PARSER_EXTENSIONS_DENY, ""));
// prepare a solr index profile switch list
final File solrBackupProfile = new File("defaults/solr.keys.list");
final String schemename =
getConfig("federated.service.solr.indexing.schemefile", "solr.keys.default.list");
final File solrWorkProfile = new File(getDataPath(), "DATA/SETTINGS/" + schemename);
if ( !solrWorkProfile.exists() ) {
FileUtils.copy(solrBackupProfile, solrWorkProfile);
}
final SolrConfiguration backupScheme = new SolrConfiguration(solrBackupProfile);
this.solrScheme = new SolrConfiguration(solrWorkProfile);
// update the working scheme with the backup scheme. This is necessary to include new features.
// new features are always activated by default (if activated in input-backupScheme)
this.solrScheme.fill(backupScheme, true);
// set up the solr interface
final String solrurls = getConfig("federated.service.solr.indexing.url", "http://127.0.0.1:8983/solr");
final boolean usesolr = getConfigBool("federated.service.solr.indexing.enabled", false) & solrurls.length() > 0;
try {
this.indexSegments.segment(Segments.Process.LOCALCRAWLING).connectSolr(
(usesolr) ? new SolrShardingConnector(
solrurls,
SolrShardingSelection.Method.MODULO_HOST_MD5,
10000, true) : null);
} catch ( final IOException e ) {
Log.logException(e);
this.indexSegments.segment(Segments.Process.LOCALCRAWLING).connectSolr(null);
}
// start a loader
this.log.logConfig("Starting Crawl Loader");
this.loader = new LoaderDispatcher(this);
@ -1912,7 +1914,7 @@ public final class Switchboard extends serverSwitch
// flush the document compressor cache
Cache.commit();
Digest.cleanup(); // don't let caches become permanent memory leaks
// clear caches if necessary
if ( !MemoryControl.request(8000000L, false) ) {
for ( final Segment indexSegment : this.indexSegments ) {

Loading…
Cancel
Save