preparations to integrate the new 'cell' index data structure

(this commit is just to move development files to my other computer, no functionality change so far)

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5509 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 16 years ago
parent d399444e49
commit b74159feb8

@ -0,0 +1,214 @@
// indexRAMRI.java
// (C) 2005, 2006 by Michael Peter Christen; mc@yacy.net, Frankfurt a. M., Germany
// first published 2005 on http://yacy.net
//
// This is a part of YaCy, a peer-to-peer based web search engine
//
// $LastChangedDate: 2009-01-02 12:38:20 +0100 (Fr, 02 Jan 2009) $
// $LastChangedRevision: 5432 $
// $LastChangedBy: orbiter $
//
// LICENSE
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package de.anomic.index;
import java.io.File;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import de.anomic.kelondro.kelondroCloneableIterator;
import de.anomic.kelondro.kelondroMergeIterator;
import de.anomic.kelondro.kelondroOrder;
import de.anomic.kelondro.kelondroRow;
/*
* an index cell is a part of the horizontal index in the new segment-oriented index
* data structure of YaCy. If there is no filter in front of a cell, it might also be
* the organization for a complete segment index. Each cell consists of a number of BLOB files, that
* must be merged to represent a single index. In fact these index files are only merged on demand
* if there are too many of them. An index merge can be done with a stream read and stream write operation.
* in normal operation, there are only a number of read-only BLOB files and a single RAM cache that is
* kept in the RAM as long as a given limit of entries is reached. Then the cache is flushed and becomes
* another BLOB file in the index array.
*/
public final class indexCell implements indexRI {
// class variables
private indexContainerBLOBArray array;
private indexContainerRAMHeap ram;
private int maxRamEntries;
@SuppressWarnings("unchecked")
public indexCell(
final File cellPath,
final kelondroRow payloadrow,
final int maxRamEntries
) throws IOException {
this.array = new indexContainerBLOBArray(cellPath, payloadrow);
this.ram = new indexContainerRAMHeap(payloadrow);
this.maxRamEntries = maxRamEntries;
}
private void cacheDump() throws IOException {
// dump the ram
File dumpFile = this.array.newContainerBLOBFile();
this.ram.dump(dumpFile);
// get a fresh ram cache
this.ram = new indexContainerRAMHeap(this.array.rowdef());
// add the dumped indexContainerBLOB to the array
this.array.mountBLOBContainer(dumpFile);
}
/**
* add entries to the cell: this adds the new entries always to the RAM part, never to BLOBs
* @throws IOException
*/
public synchronized void addEntries(indexContainer newEntries) throws IOException {
this.ram.add(newEntries);
if (this.ram.size() > this.maxRamEntries) cacheDump();
}
/**
* clear the RAM and BLOB part, deletes everything in the cell
*/
public synchronized void clear() throws IOException {
this.ram.clear();
this.array.clear();
}
/**
* when a cell is closed, the current RAM is dumped to a file which will be opened as
* BLOB file the next time a cell is opened. A name for the dump is automatically generated
* and is composed of the current date and the cell salt
*/
public synchronized void close() {
// dump the ram
try {
this.ram.dump(this.array.newContainerBLOBFile());
} catch (IOException e) {
e.printStackTrace();
}
// close all
this.ram.close();
this.array.close();
}
/**
* deleting a container affects the containers in RAM and all the BLOB files
* the deleted containers are merged and returned as result of the method
*/
public indexContainer deleteContainer(String wordHash) throws IOException {
indexContainer c0 = this.ram.delete(wordHash);
indexContainer c1 = this.array.get(wordHash);
if (c1 == null) {
if (c0 == null) return null;
return c0;
}
this.array.delete(wordHash);
if (c0 == null) return c1;
return c1.merge(c0);
}
/**
* all containers in the BLOBs and the RAM are merged and returned
*/
public indexContainer getContainer(String wordHash, Set<String> urlselection) throws IOException {
indexContainer c0 = this.ram.get(wordHash);
indexContainer c1 = this.array.get(wordHash);
if (c1 == null) {
if (c0 == null) return null;
return c0;
}
if (c0 == null) return c1;
return c1.merge(c0);
}
/**
* checks if there is any container for this wordHash, either in RAM or any BLOB
*/
public boolean hasContainer(String wordHash) {
if (this.ram.has(wordHash)) return true;
return this.array.has(wordHash);
}
public int minMem() {
return 10 * 1024 * 1024;
}
/**
* remove url references from a selected word hash. this deletes also in the BLOB
* files, which means that there exists new gap entries after the deletion
* The gaps are never merged in place, but can be eliminated when BLOBs are merged into
* new BLOBs. This returns the sum of all url references that have been removed
* @throws IOException
* @throws IOException
*/
public int removeEntries(String wordHash, Set<String> urlHashes) throws IOException {
int reduced = this.array.replace(wordHash, new RemoveRewriter(urlHashes));
return reduced / this.array.rowdef().objectsize;
}
public boolean removeEntry(String wordHash, String urlHash) throws IOException {
int reduced = this.array.replace(wordHash, new RemoveRewriter(urlHash));
return reduced > 0;
}
public int size() {
return this.ram.size() + this.array.size();
}
public kelondroCloneableIterator<indexContainer> wordContainers(String startWordHash, boolean rot) throws IOException {
return wordContainers(startWordHash, rot, false);
}
public synchronized kelondroCloneableIterator<indexContainer> wordContainers(final String startWordHash, boolean rot, final boolean ramOnly) throws IOException {
final kelondroOrder<indexContainer> containerOrder = new indexContainerOrder(this.ram.rowdef().getOrdering().clone());
containerOrder.rotate(new indexContainer(startWordHash, this.ram.rowdef(), 0));
if (ramOnly) {
return this.ram.wordContainers(startWordHash, false);
}
return new kelondroMergeIterator<indexContainer>(
this.ram.wordContainers(startWordHash, false),
this.array.wordContainers(startWordHash, false),
containerOrder,
indexContainer.containerMergeMethod,
true);
}
public class RemoveRewriter implements indexContainerBLOBArray.ContainerRewriter {
Set<String> urlHashes;
public RemoveRewriter(Set<String> urlHashes) {
this.urlHashes = urlHashes;
}
public RemoveRewriter(String urlHash) {
this.urlHashes = new HashSet<String>();
this.urlHashes.add(urlHash);
}
public indexContainer rewrite(indexContainer container) {
container.removeEntries(urlHashes);
return container;
}
}
}

@ -32,12 +32,13 @@ import java.util.Date;
import java.util.Iterator;
import java.util.List;
import de.anomic.kelondro.kelondroBLOB;
import de.anomic.kelondro.kelondroBLOBArray;
import de.anomic.kelondro.kelondroCloneableIterator;
import de.anomic.kelondro.kelondroRow;
import de.anomic.kelondro.kelondroRowSet;
public final class indexContainerBLOBHeap {
public final class indexContainerBLOBArray {
private final kelondroRow payloadrow;
private final kelondroBLOBArray array;
@ -52,28 +53,27 @@ public final class indexContainerBLOBHeap {
* @param log
* @throws IOException
*/
public indexContainerBLOBHeap(
public indexContainerBLOBArray(
final File heapLocation,
final String blobSalt,
final kelondroRow payloadrow) throws IOException {
this.payloadrow = payloadrow;
this.array = new kelondroBLOBArray(
heapLocation,
blobSalt,
"index",
payloadrow.primaryKeyLength,
payloadrow.getOrdering(),
0);
}
public void close() {
public synchronized void close() {
this.array.close();
}
public void clear() throws IOException {
public synchronized void clear() throws IOException {
this.array.clear();
}
public int size() {
public synchronized int size() {
return (this.array == null) ? 0 : this.array.size();
}
@ -81,6 +81,14 @@ public final class indexContainerBLOBHeap {
return this.array.newBLOB(new Date());
}
public void mountBLOBContainer(File location) throws IOException {
this.array.mountBLOB(location);
}
public kelondroRow rowdef() {
return this.payloadrow;
}
/**
* return an iterator object that creates top-level-clones of the indexContainers
* in the cache, so that manipulations of the iterated objects do not change
@ -159,7 +167,7 @@ public final class indexContainerBLOBHeap {
* @return true, if the key is used in the heap; false othervise
* @throws IOException
*/
public boolean has(final String key) {
public synchronized boolean has(final String key) {
return this.array.has(key.getBytes());
}
@ -169,7 +177,7 @@ public final class indexContainerBLOBHeap {
* @return the indexContainer if one exist, null otherwise
* @throws IOException
*/
public indexContainer get(final String key) throws IOException {
public synchronized indexContainer get(final String key) throws IOException {
List<byte[]> entries = this.array.getAll(key.getBytes());
if (entries == null || entries.size() == 0) return null;
byte[] a = entries.remove(0);
@ -190,4 +198,63 @@ public final class indexContainerBLOBHeap {
// returns the index that had been deleted
array.remove(wordHash.getBytes());
}
public synchronized int replace(final String wordHash, ContainerRewriter rewriter) throws IOException {
return array.replace(wordHash.getBytes(), new BLOBRewriter(wordHash, rewriter));
}
public class BLOBRewriter implements kelondroBLOB.Rewriter {
ContainerRewriter rewriter;
String wordHash;
public BLOBRewriter(String wordHash, ContainerRewriter rewriter) {
this.rewriter = rewriter;
this.wordHash = wordHash;
}
public byte[] rewrite(byte[] b) {
if (b == null) return null;
indexContainer c = rewriter.rewrite(new indexContainer(this.wordHash, kelondroRowSet.importRowSet(b, payloadrow)));
if (c == null) return null;
return c.exportCollection();
}
}
/*
public int mergeOldest() {
if (this.array.entries() < 2) return 0;
File f1 = this.array.unmountOldestBLOB();
File f2 = this.array.unmountOldestBLOB();
// iterate both files and write a new one
new kelondroMergeIterator<indexContainer>(
(kelondroCloneableIterator<Map.Entry<String, byte[]>>) new kelondroBLOBHeapReader.entries(f1, this.payloadrow.objectsize),
null,
null,
null,
true);
return 0;
}
*/
/*
* new kelondroMergeIterator<indexContainer>(
new kelondroBLOBHeapReader.entries(f1, this.payloadrow.objectsize),
new kelondroBLOBHeapReader.entries(f2, this.payloadrow.objectsize),
this.payloadrow.getOrdering(),
indexContainer.containerMergeMethod,
true);
*/
/*
public kelondroMergeIterator(
final kelondroCloneableIterator<E> a,
final kelondroCloneableIterator<E> b,
final Comparator<E> c,
final Method m, final boolean up) {
*/
public interface ContainerRewriter {
public indexContainer rewrite(indexContainer container);
}
}

@ -65,6 +65,10 @@ public final class indexContainerRAMHeap {
this.cache = null;
}
public kelondroRow rowdef() {
return this.payloadrow;
}
public void clear() {
if (cache != null) cache.clear();
initWriteMode();
@ -241,13 +245,9 @@ public final class indexContainerRAMHeap {
* because they may get very large, it is wise to deallocate some memory before calling next()
*/
public indexContainer next() {
try {
Map.Entry<String, byte[]> entry = blobs.next();
byte[] payload = entry.getValue();
return new indexContainer(entry.getKey(), kelondroRowSet.importRowSet(payload, payloadrow));
} catch (final IOException e) {
return null;
}
Map.Entry<String, byte[]> entry = blobs.next();
byte[] payload = entry.getValue();
return new indexContainer(entry.getKey(), kelondroRowSet.importRowSet(payload, payloadrow));
}
public void remove() {

@ -29,6 +29,7 @@ package de.anomic.index;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Set;
import de.anomic.kelondro.kelondroCloneableIterator;
@ -37,7 +38,7 @@ import de.anomic.kelondro.kelondroRow;
import de.anomic.server.serverMemory;
import de.anomic.server.logging.serverLog;
public final class indexRAMRI implements indexRI, indexRIReader {
public final class indexRAMRI implements indexRI, indexRIReader, Iterable<indexContainer> {
// class variables
private final kelondroMScoreCluster<String> hashScore;
@ -334,4 +335,9 @@ public final class indexRAMRI implements indexRI, indexRIReader {
hashScore.clear();
hashDate.clear();
}
public Iterator iterator() {
// TODO Auto-generated method stub
return null;
}
}

@ -37,13 +37,13 @@ public interface indexRI {
public int size();
public int minMem();
public kelondroCloneableIterator<indexContainer> wordContainers(String startWordHash, boolean rot); // method to replace wordHashes
public kelondroCloneableIterator<indexContainer> wordContainers(String startWordHash, boolean rot) throws IOException; // method to replace wordHashes
public boolean hasContainer(String wordHash); // should only be used if in case that true is returned the getContainer is NOT called
public indexContainer getContainer(String wordHash, Set<String> urlselection) throws IOException; // if urlselection != null all url references which are not in urlselection are removed from the container
public indexContainer deleteContainer(String wordHash) throws IOException;
public boolean removeEntry(String wordHash, String urlHash);
public int removeEntries(String wordHash, Set<String> urlHashes);
public void addEntries(indexContainer newEntries);
public boolean removeEntry(String wordHash, String urlHash) throws IOException;
public int removeEntries(String wordHash, Set<String> urlHashes) throws IOException;
public void addEntries(indexContainer newEntries) throws IOException;
public void clear() throws IOException;
public void close();

@ -118,6 +118,21 @@ public interface kelondroBLOB {
*/
public void put(byte[] key, byte[] b) throws IOException;
/**
* replace an existing entry in the BLOB with a new entry
* this method is similar to put, but it is necessary that a blob entry existed before
* and contains an entry of same size or bigger than the new entry.
* The old entry is then replaced by the new entry.
* This method throws a IOException if the new element is bigger than the old element.
* It is therefore necessary that it is known that the new entry will be smaller than the
* old entry before calling this method.
* @param key the primary key
* @param b
* @return the number of bytes that the rewriter reduced the BLOB
* @throws IOException
*/
public int replace(byte[] key, Rewriter rewriter) throws IOException;
/**
* remove a BLOB
* @param key the primary key
@ -130,4 +145,17 @@ public interface kelondroBLOB {
*/
public void close();
public interface Rewriter {
/**
* a rewrite method that is used in the replace functionality of a BLOB
* the result of such a rewrite must be always smaller or equal in size
* if the input
* @param b
* @return an array that is equal or smaller in size than b
*/
public byte[] rewrite(byte[] b);
}
}

@ -126,6 +126,61 @@ public class kelondroBLOBArray implements kelondroBLOB {
}
}
/**
* add a blob file to the array.
* note that this file must be generated with a file name from newBLOB()
* @param location
* @throws IOException
*/
public void mountBLOB(File location) throws IOException {
Date d;
try {
d = serverDate.parseShortSecond(location.getName().substring(0, 14));
} catch (ParseException e) {
throw new IOException("date parse problem with file " + location.toString() + ": " + e.getMessage());
}
kelondroBLOB oneBlob = (buffersize > 0) ? new kelondroBLOBHeap(location, keylength, ordering, buffersize) : new kelondroBLOBHeapModifier(location, keylength, ordering);
blobs.add(new blobItem(d, location, oneBlob));
}
public void unmountBLOB(File location) {
Iterator<blobItem> i = this.blobs.iterator();
blobItem b;
while (i.hasNext()) {
b = i.next();
if (b.location.equals(location)) {
i.remove();
b.blob.close();
return;
}
}
}
public File unmountOldestBLOB() {
if (this.blobs.size() == 0) return null;
blobItem b = this.blobs.remove(0);
b.blob.close();
return b.location;
}
/**
* return the number of BLOB files in this array
* @return
*/
public int entries() {
return this.blobs.size();
}
/**
* generate a new BLOB file name with a given date.
* This method is needed to generate a file name that matches to the name structure that is needed for parts of the array
* @param creation
* @return
*/
public File newBLOB(Date creation) {
return new File(heapLocation, serverDate.formatShortSecond(creation) + "." + blobSalt + ".blob");
}
public String name() {
return this.heapLocation.getName();
}
@ -190,11 +245,7 @@ public class kelondroBLOBArray implements kelondroBLOB {
this.blob = (buffer == 0) ? new kelondroBLOBHeapModifier(location, keylength, ordering) : new kelondroBLOBHeap(location, keylength, ordering, buffer);
}
}
public File newBLOB(Date creation) {
return new File(heapLocation, serverDate.formatShortSecond(creation) + "." + blobSalt + ".blob");
}
/**
* ask for the length of the primary key
* @return the length of the key
@ -337,6 +388,19 @@ public class kelondroBLOBArray implements kelondroBLOB {
executeLimits();
}
/**
* replace a BLOB entry with another which must be smaller or same size
* @param key the primary key
* @throws IOException
*/
public int replace(byte[] key, Rewriter rewriter) throws IOException {
int d = 0;
for (blobItem bi: blobs) {
d += bi.blob.replace(key, rewriter);
}
return d;
}
/**
* remove a BLOB
* @param key the primary key

@ -268,4 +268,15 @@ public class kelondroBLOBCompressor extends Thread implements kelondroBLOB {
assert this.bufferlength == 0;
}
public int replace(byte[] key, Rewriter rewriter) throws IOException {
byte[] b = get(key);
if (b == null) return 0;
byte[] c = rewriter.rewrite(b);
int reduction = c.length - b.length;
assert reduction >= 0;
if (reduction == 0) return 0;
this.put(key, c);
return reduction;
}
}

@ -30,6 +30,7 @@ import java.util.Iterator;
import java.util.Map;
import java.util.SortedMap;
import de.anomic.server.serverMemory;
import de.anomic.server.logging.serverLog;
public class kelondroBLOBHeapModifier extends kelondroBLOBHeapReader implements kelondroBLOB {
@ -274,4 +275,56 @@ public class kelondroBLOBHeapModifier extends kelondroBLOBHeapReader implements
throw new UnsupportedOperationException("put is not supported in BLOBHeapModifier");
}
public int replace(byte[] key, Rewriter rewriter) throws IOException {
assert index.row().primaryKeyLength == key.length : index.row().primaryKeyLength + "!=" + key.length;
// check if the index contains the key
final long pos = index.getl(key);
if (pos < 0) return 0;
// access the file and read the container
file.seek(pos);
final int len = file.readInt() - index.row().primaryKeyLength;
if (serverMemory.available() < len) {
if (!serverMemory.request(len, true)) return 0; // not enough memory available for this blob
}
// read the key
final byte[] keyf = new byte[index.row().primaryKeyLength];
file.readFully(keyf, 0, keyf.length);
assert this.ordering.compare(key, keyf) == 0;
// read the blob
byte[] blob = new byte[len];
file.readFully(blob, 0, blob.length);
// rewrite the entry
blob = rewriter.rewrite(blob);
int reduction = len - blob.length;
if (reduction == 0) return 0; // nothing to do
// check if the new entry is smaller than the old entry
if (blob.length > len - 4) throw new IOException("replace of BLOB for key " + new String(key) + " failed (too large): new size = " + blob.length + ", old size = " + (len - 4));
// replace old content
this.file.seek(pos);
file.writeInt(blob.length + key.length);
file.write(key);
file.write(blob);
// define the new empty entry
final int newfreereclen = reduction - 4;
assert newfreereclen > 0;
file.writeInt(newfreereclen);
// fill zeros to the content
int l = newfreereclen; byte[] fill = new byte[newfreereclen];
while (l-- > 0) fill[l] = 0;
this.file.write(fill, 0, newfreereclen);
// add a new free entry
this.free.put(pos + 4 + blob.length + key.length, newfreereclen);
return reduction;
}
}

@ -31,6 +31,7 @@ import java.io.FileInputStream;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ExecutionException;
import de.anomic.server.serverMemory;
@ -232,7 +233,7 @@ public class kelondroBLOBHeapReader {
file.seek(pos);
final int len = file.readInt() - index.row().primaryKeyLength;
if (serverMemory.available() < len) {
if (!serverMemory.request(len, false)) return null; // not enough memory available for this blob
if (!serverMemory.request(len, true)) return null; // not enough memory available for this blob
}
// read the key
@ -327,17 +328,32 @@ public class kelondroBLOBHeapReader {
* static iterator of entries in BLOBHeap files:
* this is used to import heap dumps into a write-enabled index heap
*/
public static class entries implements Iterator<Map.Entry<String, byte[]>>, Iterable<Map.Entry<String, byte[]>> {
public static class entries implements
kelondroCloneableIterator<Map.Entry<String, byte[]>>,
Iterator<Map.Entry<String, byte[]>>,
Iterable<Map.Entry<String, byte[]>> {
DataInputStream is;
int keylen;
private File blobFile;
Map.Entry<String, byte[]> nextEntry;
public entries(final File blobFile, final int keylen) throws IOException {
if (!(blobFile.exists())) throw new IOException("file " + blobFile + " does not exist");
this.is = new DataInputStream(new BufferedInputStream(new FileInputStream(blobFile), 1024*1024));
this.keylen = keylen;
this.blobFile = blobFile;
this.nextEntry = next0();
}
public kelondroCloneableIterator<Entry<String, byte[]>> clone(Object modifier) {
try {
return new entries(blobFile, keylen);
} catch (IOException e) {
e.printStackTrace();
return null;
}
}
public boolean hasNext() {
return this.nextEntry != null;

@ -510,4 +510,17 @@ public class kelondroBLOBTree implements kelondroBLOB {
public long length() throws IOException {
return this.file.length();
}
public int replace(byte[] key, Rewriter rewriter) throws IOException {
byte[] b = get(key);
if (b == null) {
remove(key);
return 0;
}
byte[] c = rewriter.rewrite(b);
int reduced = b.length - c.length;
assert reduced >= 0;
put(key, c);
return reduced;
}
}

@ -37,7 +37,12 @@ public class kelondroMergeIterator<E> implements kelondroCloneableIterator<E> {
private final Method merger;
private final boolean up;
public kelondroMergeIterator(final kelondroCloneableIterator<E> a, final kelondroCloneableIterator<E> b, final Comparator<E> c, final Method m, final boolean up) {
public kelondroMergeIterator(
final kelondroCloneableIterator<E> a,
final kelondroCloneableIterator<E> b,
final Comparator<E> c,
final Method m,
final boolean up) {
// this works currently only for String-type key iterations
this.a = a;
this.b = b;
@ -47,8 +52,8 @@ public class kelondroMergeIterator<E> implements kelondroCloneableIterator<E> {
nexta();
nextb();
}
public kelondroMergeIterator<E> clone(final Object modifier) {
public kelondroMergeIterator<E> clone(final Object modifier) {
return new kelondroMergeIterator<E>(a.clone(modifier), b.clone(modifier), comp, merger, up);
}

@ -85,7 +85,7 @@ public class kelondroRowSet extends kelondroRowCollection implements kelondroInd
return new kelondroRowSet(rowdef, size, chunkcache, orderbound);
}
public static kelondroRowSet importRowSet(byte[] b, final kelondroRow rowdef) throws IOException {
public static kelondroRowSet importRowSet(byte[] b, final kelondroRow rowdef) {
final int size = (int) kelondroNaturalOrder.decodeLong(b, 0, 4);
final int orderbound = (int) kelondroNaturalOrder.decodeLong(b, 10, 4);
final byte[] chunkcache = new byte[size * rowdef.objectsize];

@ -141,7 +141,19 @@ public final class plasmaWordIndex implements indexRI {
}
}
}
/*
*
final File textindexcache = new File(indexPrimaryTextLocation, "RICACHE");
if (!(textindexcache.exists())) textindexcache.mkdirs();
if (new File(textindexcache, "index.dhtout.blob").exists()) {
this.dhtCache = new indexRAMRI(textindexcache, indexRWIRowEntry.urlEntryRow, entityCacheMaxSize, wCacheMaxChunk, wCacheMaxAge, "index.dhtout.heap", "index.dhtout.blob", log);
indexRAMRI dhtInCache = new indexRAMRI(textindexcache, indexRWIRowEntry.urlEntryRow, entityCacheMaxSize, wCacheMaxChunk, wCacheMaxAge, "index.dhtin.heap", "index.dhtin.blob", log);
indexContainer c1;
for (indexContainer c: dhtInCache) {
this.dhtCache.addEntries(c);
}
}
*/
final File textindexcache = new File(indexPrimaryTextLocation, "RICACHE");
if (!(textindexcache.exists())) textindexcache.mkdirs();
this.dhtOutCache = new indexRAMRI(textindexcache, indexRWIRowEntry.urlEntryRow, entityCacheMaxSize, wCacheMaxChunk, wCacheMaxAge, "index.dhtout.heap", "index.dhtout.blob", log);

Loading…
Cancel
Save