renamed wordHashes for a word hash set generation to wordHashSet

This was done because the wordHashes iterator will get another integer
parameter and then conflicts with the wordHashes set generation

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@1921 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 19 years ago
parent d5f8f40c31
commit 6c70f4a0cf

@ -285,7 +285,7 @@ public class IndexControl_p {
// generate list
if (post.containsKey("keyhashsimilar")) {
try {
final Iterator hashIt = switchboard.wordIndex.wordHashes(keyhash, plasmaWordIndex.RL_WORDFILES, true, 256).iterator();
final Iterator hashIt = switchboard.wordIndex.wordHashSet(keyhash, plasmaWordIndex.RL_WORDFILES, true, 256).iterator();
StringBuffer result = new StringBuffer("Sequential List of Word-Hashes:<br>");
String hash;
int i = 0;

@ -115,7 +115,7 @@ public class plasmaDbImporter extends AbstractImporter implements dbImporter {
// iterate over all words from import db
//Iterator importWordHashIterator = this.importWordIndex.wordHashes(this.wordChunkStartHash, plasmaWordIndex.RL_WORDFILES, false);
Iterator importWordHashIterator = this.importWordIndex.wordHashes(this.wordChunkStartHash, plasmaWordIndex.RL_WORDFILES, false, 100).iterator();
Iterator importWordHashIterator = this.importWordIndex.wordHashSet(this.wordChunkStartHash, plasmaWordIndex.RL_WORDFILES, false, 100).iterator();
while (!isAborted() && importWordHashIterator.hasNext()) {
TreeSet entityUrls = new TreeSet(new kelondroNaturalOrder(true));
@ -215,7 +215,7 @@ public class plasmaDbImporter extends AbstractImporter implements dbImporter {
if (!importWordHashIterator.hasNext()) {
// We may not be finished yet, try to get the next chunk of wordHashes
TreeSet wordHashes = this.importWordIndex.wordHashes(this.wordHash, plasmaWordIndex.RL_WORDFILES, false, 100);
TreeSet wordHashes = this.importWordIndex.wordHashSet(this.wordHash, plasmaWordIndex.RL_WORDFILES, false, 100);
importWordHashIterator = wordHashes.iterator();
// Make sure we don't get the same wordhash twice, but don't skip a word
if ((importWordHashIterator.hasNext())&&(!this.wordHash.equals(importWordHashIterator.next()))) {

@ -174,7 +174,7 @@ public class plasmaDHTChunk {
ArrayList tmpContainers = new ArrayList(maxcount);
String nexthash = "";
try {
Iterator wordHashIterator = wordIndex.wordHashes(hash, resourceLevel, true, maxcount).iterator();
Iterator wordHashIterator = wordIndex.wordHashSet(hash, resourceLevel, true, maxcount).iterator();
plasmaWordIndexEntryContainer indexContainer;
Iterator urlIter;
plasmaWordIndexEntry indexEntry;

@ -378,7 +378,7 @@ public final class plasmaWordIndex {
public static final int RL_ASSORTMENTS = 2;
public static final int RL_WORDFILES = 3;
public synchronized TreeSet wordHashes(String startHash, int resourceLevel, boolean rot, int count) throws IOException {
public synchronized TreeSet wordHashSet(String startHash, int resourceLevel, boolean rot, int count) throws IOException {
kelondroOrder hashOrder = (kelondroOrder) indexOrder.clone();
hashOrder.rotate(startHash.getBytes());
TreeSet hashes = new TreeSet(hashOrder);
@ -540,7 +540,7 @@ public final class plasmaWordIndex {
URL url = null;
HashSet urlHashs = new HashSet();
try {
Iterator wordHashIterator = wordHashes(startHash, plasmaWordIndex.RL_WORDFILES, false, 100).iterator();
Iterator wordHashIterator = wordHashSet(startHash, plasmaWordIndex.RL_WORDFILES, false, 100).iterator();
while (wordHashIterator.hasNext() && run) {
waiter();
wordHash = (String) wordHashIterator.next();
@ -572,7 +572,7 @@ public final class plasmaWordIndex {
}
if (!wordHashIterator.hasNext()) {
// We may not be finished yet, try to get the next chunk of wordHashes
TreeSet wordHashes = wordHashes(wordHash, plasmaWordIndex.RL_WORDFILES, false, 100);
TreeSet wordHashes = wordHashSet(wordHash, plasmaWordIndex.RL_WORDFILES, false, 100);
wordHashIterator = wordHashes.iterator();
// Make sure we don't get the same wordhash twice, but don't skip a word
if ((wordHashIterator.hasNext())&&(!wordHash.equals(wordHashIterator.next()))) {

Loading…
Cancel
Save