refactoring: better naming for classic DB (files in WORDS)

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@2151 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 19 years ago
parent 5041d330ce
commit eaa6f012f0

@ -82,12 +82,12 @@ public final class plasmaWordIndex extends indexAbstractRI implements indexRI {
private final indexRAMCacheRI ramCache;
private final plasmaWordIndexAssortmentCluster assortmentCluster;
private int assortmentBufferSize; //kb
private final plasmaWordIndexClassicDB backend;
private final plasmaWordIndexFileCluster backend;
private final kelondroOrder indexOrder = new kelondroNaturalOrder(true);
public plasmaWordIndex(File databaseRoot, int bufferkb, serverLog log) {
this.databaseRoot = databaseRoot;
this.backend = new plasmaWordIndexClassicDB(databaseRoot, log);
this.backend = new plasmaWordIndexFileCluster(databaseRoot, log);
this.ramCache = new indexRAMCacheRI(databaseRoot, log);
// create new assortment cluster path
@ -343,7 +343,7 @@ public final class plasmaWordIndex extends indexAbstractRI implements indexRI {
public int indexSize(String wordHash) {
int size = 0;
try {
plasmaWordIndexEntity entity = backend.getEntity(wordHash, true, -1);
plasmaWordIndexFile entity = backend.getEntity(wordHash, true, -1);
if (entity != null) {
size += entity.size();
entity.close();
@ -498,11 +498,11 @@ public final class plasmaWordIndex extends indexAbstractRI implements indexRI {
public Object migrateWords2Assortment(String wordhash) throws IOException {
// returns the number of entries that had been added to the assortments
// can be negative if some assortments have been moved to the backend
File db = plasmaWordIndexEntity.wordHash2path(databaseRoot, wordhash);
File db = plasmaWordIndexFile.wordHash2path(databaseRoot, wordhash);
if (!(db.exists())) return "not available";
plasmaWordIndexEntity entity = null;
plasmaWordIndexFile entity = null;
try {
entity = new plasmaWordIndexEntity(databaseRoot, wordhash, true);
entity = new plasmaWordIndexFile(databaseRoot, wordhash, true);
int size = entity.size();
if (size > assortmentCluster.clusterCapacity) {
// this will be too big to integrate it

@ -55,14 +55,14 @@ import de.anomic.kelondro.kelondroTree;
import de.anomic.kelondro.kelondroException;
import de.anomic.server.logging.serverLog;
public final class plasmaWordIndexEntity {
public final class plasmaWordIndexFile {
private final String theWordHash;
private kelondroTree theIndex;
private File theLocation;
private boolean delete;
public plasmaWordIndexEntity(File databaseRoot, String wordHash, boolean deleteIfEmpty) {
public plasmaWordIndexFile(File databaseRoot, String wordHash, boolean deleteIfEmpty) {
theWordHash = wordHash;
theIndex = indexFile(databaseRoot, wordHash);
delete = deleteIfEmpty;
@ -248,7 +248,7 @@ public final class plasmaWordIndexEntity {
}
public void merge(plasmaWordIndexEntity otherEntity, long time) throws IOException {
public void merge(plasmaWordIndexFile otherEntity, long time) throws IOException {
// this is a merge of another entity to this entity
// the merge is interrupted when the given time is over
// a time=-1 means: no timeout

@ -58,14 +58,14 @@ import de.anomic.kelondro.kelondroNaturalOrder;
import de.anomic.server.logging.serverLog;
import de.anomic.yacy.yacySeedDB;
public class plasmaWordIndexClassicDB extends indexAbstractRI implements indexRI {
public class plasmaWordIndexFileCluster extends indexAbstractRI implements indexRI {
// class variables
private final File databaseRoot;
private final serverLog log;
private int size;
public plasmaWordIndexClassicDB(File databaseRoot, serverLog log) {
public plasmaWordIndexFileCluster(File databaseRoot, serverLog log) {
this.databaseRoot = databaseRoot;
this.log = log;
this.size = 0;
@ -193,8 +193,8 @@ public class plasmaWordIndexClassicDB extends indexAbstractRI implements indexRI
public synchronized indexContainer getContainer(String wordHash, boolean deleteIfEmpty, long maxTime) {
long start = System.currentTimeMillis();
if ((maxTime < 0) || (maxTime > 60000)) maxTime=60000; // maximum is one minute
if (plasmaWordIndexEntity.wordHash2path(databaseRoot, wordHash).exists()) {
plasmaWordIndexEntity entity = this.getEntity(wordHash, deleteIfEmpty, (maxTime < 0) ? -1 : maxTime * 9 / 10);
if (plasmaWordIndexFile.wordHash2path(databaseRoot, wordHash).exists()) {
plasmaWordIndexFile entity = this.getEntity(wordHash, deleteIfEmpty, (maxTime < 0) ? -1 : maxTime * 9 / 10);
indexTreeMapContainer container = new indexTreeMapContainer(wordHash);
indexURLEntry entry;
Iterator i = entity.elements(true);
@ -208,23 +208,23 @@ public class plasmaWordIndexClassicDB extends indexAbstractRI implements indexRI
}
}
public plasmaWordIndexEntity getEntity(String wordHash, boolean deleteIfEmpty, long maxTime) {
return new plasmaWordIndexEntity(databaseRoot, wordHash, deleteIfEmpty);
public plasmaWordIndexFile getEntity(String wordHash, boolean deleteIfEmpty, long maxTime) {
return new plasmaWordIndexFile(databaseRoot, wordHash, deleteIfEmpty);
}
public long getUpdateTime(String wordHash) {
File f = plasmaWordIndexEntity.wordHash2path(databaseRoot, wordHash);
File f = plasmaWordIndexFile.wordHash2path(databaseRoot, wordHash);
if (f.exists()) return f.lastModified(); else return -1;
}
public indexContainer deleteContainer(String wordHash) {
plasmaWordIndexEntity.removePlasmaIndex(databaseRoot, wordHash);
plasmaWordIndexFile.removePlasmaIndex(databaseRoot, wordHash);
return new indexTreeMapContainer(wordHash);
}
public int removeEntries(String wordHash, String[] urlHashes, boolean deleteComplete) {
// removes all given url hashes from a single word index. Returns number of deletions.
plasmaWordIndexEntity pi = null;
plasmaWordIndexFile pi = null;
int count = 0;
try {
pi = getEntity(wordHash, true, -1);
@ -249,9 +249,9 @@ public class plasmaWordIndexClassicDB extends indexAbstractRI implements indexRI
if ((container == null) || (container.size() == 0)) return null;
// open file
plasmaWordIndexEntity pi = null;
plasmaWordIndexFile pi = null;
try {
pi = new plasmaWordIndexEntity(databaseRoot, container.wordHash(), false);
pi = new plasmaWordIndexFile(databaseRoot, container.wordHash(), false);
pi.addEntries(container);
// close and return

@ -83,8 +83,8 @@ import de.anomic.plasma.plasmaURLPool;
import de.anomic.plasma.plasmaWordIndex;
import de.anomic.plasma.plasmaWordIndexAssortment;
import de.anomic.plasma.plasmaWordIndexAssortmentCluster;
import de.anomic.plasma.plasmaWordIndexClassicDB;
import de.anomic.plasma.plasmaWordIndexEntity;
import de.anomic.plasma.plasmaWordIndexFileCluster;
import de.anomic.plasma.plasmaWordIndexFile;
import de.anomic.index.indexURLEntry;
import de.anomic.server.serverCore;
import de.anomic.server.serverDate;
@ -1140,7 +1140,7 @@ public final class yacy {
Iterator i = stopwords.iterator();
while (i.hasNext()) {
w = (String) i.next();
f = plasmaWordIndexEntity.wordHash2path(dbRoot, indexEntryAttribute.word2hash(w));
f = plasmaWordIndexFile.wordHash2path(dbRoot, indexEntryAttribute.word2hash(w));
if (f.exists()) {
thisamount = f.length();
if (f.delete()) {
@ -1318,7 +1318,7 @@ public final class yacy {
plasmaWordIndexAssortment assortment = new plasmaWordIndexAssortment(new File(homeDBroot, "ACLUSTER"), a, 8*1024*1024, null);
WordHashIterator = assortment.hashes(wordChunkStartHash, true, false);
} else if (resource.equals("words")) {
plasmaWordIndexClassicDB fileDB = new plasmaWordIndexClassicDB(homeDBroot, log);
plasmaWordIndexFileCluster fileDB = new plasmaWordIndexFileCluster(homeDBroot, log);
WordHashIterator = fileDB.wordHashes(wordChunkStartHash, true, false);
}
int counter = 0;

Loading…
Cancel
Save