renamed wordHashes for a word hash set generation to wordHashSet

This was done because the wordHashes iterator will get another integer
parameter and then conflicts with the wordHashes set generation

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@1921 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 19 years ago
parent d5f8f40c31
commit 6c70f4a0cf

@ -285,7 +285,7 @@ public class IndexControl_p {
// generate list // generate list
if (post.containsKey("keyhashsimilar")) { if (post.containsKey("keyhashsimilar")) {
try { try {
final Iterator hashIt = switchboard.wordIndex.wordHashes(keyhash, plasmaWordIndex.RL_WORDFILES, true, 256).iterator(); final Iterator hashIt = switchboard.wordIndex.wordHashSet(keyhash, plasmaWordIndex.RL_WORDFILES, true, 256).iterator();
StringBuffer result = new StringBuffer("Sequential List of Word-Hashes:<br>"); StringBuffer result = new StringBuffer("Sequential List of Word-Hashes:<br>");
String hash; String hash;
int i = 0; int i = 0;

@ -115,7 +115,7 @@ public class plasmaDbImporter extends AbstractImporter implements dbImporter {
// iterate over all words from import db // iterate over all words from import db
//Iterator importWordHashIterator = this.importWordIndex.wordHashes(this.wordChunkStartHash, plasmaWordIndex.RL_WORDFILES, false); //Iterator importWordHashIterator = this.importWordIndex.wordHashes(this.wordChunkStartHash, plasmaWordIndex.RL_WORDFILES, false);
Iterator importWordHashIterator = this.importWordIndex.wordHashes(this.wordChunkStartHash, plasmaWordIndex.RL_WORDFILES, false, 100).iterator(); Iterator importWordHashIterator = this.importWordIndex.wordHashSet(this.wordChunkStartHash, plasmaWordIndex.RL_WORDFILES, false, 100).iterator();
while (!isAborted() && importWordHashIterator.hasNext()) { while (!isAborted() && importWordHashIterator.hasNext()) {
TreeSet entityUrls = new TreeSet(new kelondroNaturalOrder(true)); TreeSet entityUrls = new TreeSet(new kelondroNaturalOrder(true));
@ -215,7 +215,7 @@ public class plasmaDbImporter extends AbstractImporter implements dbImporter {
if (!importWordHashIterator.hasNext()) { if (!importWordHashIterator.hasNext()) {
// We may not be finished yet, try to get the next chunk of wordHashes // We may not be finished yet, try to get the next chunk of wordHashes
TreeSet wordHashes = this.importWordIndex.wordHashes(this.wordHash, plasmaWordIndex.RL_WORDFILES, false, 100); TreeSet wordHashes = this.importWordIndex.wordHashSet(this.wordHash, plasmaWordIndex.RL_WORDFILES, false, 100);
importWordHashIterator = wordHashes.iterator(); importWordHashIterator = wordHashes.iterator();
// Make sure we don't get the same wordhash twice, but don't skip a word // Make sure we don't get the same wordhash twice, but don't skip a word
if ((importWordHashIterator.hasNext())&&(!this.wordHash.equals(importWordHashIterator.next()))) { if ((importWordHashIterator.hasNext())&&(!this.wordHash.equals(importWordHashIterator.next()))) {

@ -174,7 +174,7 @@ public class plasmaDHTChunk {
ArrayList tmpContainers = new ArrayList(maxcount); ArrayList tmpContainers = new ArrayList(maxcount);
String nexthash = ""; String nexthash = "";
try { try {
Iterator wordHashIterator = wordIndex.wordHashes(hash, resourceLevel, true, maxcount).iterator(); Iterator wordHashIterator = wordIndex.wordHashSet(hash, resourceLevel, true, maxcount).iterator();
plasmaWordIndexEntryContainer indexContainer; plasmaWordIndexEntryContainer indexContainer;
Iterator urlIter; Iterator urlIter;
plasmaWordIndexEntry indexEntry; plasmaWordIndexEntry indexEntry;

@ -378,7 +378,7 @@ public final class plasmaWordIndex {
public static final int RL_ASSORTMENTS = 2; public static final int RL_ASSORTMENTS = 2;
public static final int RL_WORDFILES = 3; public static final int RL_WORDFILES = 3;
public synchronized TreeSet wordHashes(String startHash, int resourceLevel, boolean rot, int count) throws IOException { public synchronized TreeSet wordHashSet(String startHash, int resourceLevel, boolean rot, int count) throws IOException {
kelondroOrder hashOrder = (kelondroOrder) indexOrder.clone(); kelondroOrder hashOrder = (kelondroOrder) indexOrder.clone();
hashOrder.rotate(startHash.getBytes()); hashOrder.rotate(startHash.getBytes());
TreeSet hashes = new TreeSet(hashOrder); TreeSet hashes = new TreeSet(hashOrder);
@ -540,7 +540,7 @@ public final class plasmaWordIndex {
URL url = null; URL url = null;
HashSet urlHashs = new HashSet(); HashSet urlHashs = new HashSet();
try { try {
Iterator wordHashIterator = wordHashes(startHash, plasmaWordIndex.RL_WORDFILES, false, 100).iterator(); Iterator wordHashIterator = wordHashSet(startHash, plasmaWordIndex.RL_WORDFILES, false, 100).iterator();
while (wordHashIterator.hasNext() && run) { while (wordHashIterator.hasNext() && run) {
waiter(); waiter();
wordHash = (String) wordHashIterator.next(); wordHash = (String) wordHashIterator.next();
@ -572,7 +572,7 @@ public final class plasmaWordIndex {
} }
if (!wordHashIterator.hasNext()) { if (!wordHashIterator.hasNext()) {
// We may not be finished yet, try to get the next chunk of wordHashes // We may not be finished yet, try to get the next chunk of wordHashes
TreeSet wordHashes = wordHashes(wordHash, plasmaWordIndex.RL_WORDFILES, false, 100); TreeSet wordHashes = wordHashSet(wordHash, plasmaWordIndex.RL_WORDFILES, false, 100);
wordHashIterator = wordHashes.iterator(); wordHashIterator = wordHashes.iterator();
// Make sure we don't get the same wordhash twice, but don't skip a word // Make sure we don't get the same wordhash twice, but don't skip a word
if ((wordHashIterator.hasNext())&&(!wordHash.equals(wordHashIterator.next()))) { if ((wordHashIterator.hasNext())&&(!wordHash.equals(wordHashIterator.next()))) {

Loading…
Cancel
Save