From c7a614830af9d6cca94a0e7ec9de0b2315602c4a Mon Sep 17 00:00:00 2001
From: orbiter
Date: Fri, 15 Jun 2007 17:45:49 +0000
Subject: [PATCH] several bugfixes
git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@3899 6c8d7289-2bf4-0310-a012-ef5d649a1542
---
htroot/AccessTracker_p.java | 12 +++++++++---
htroot/CrawlStartExpert_p.html | 2 +-
htroot/CrawlStartExpert_p.java | 2 +-
htroot/CrawlStartSimple_p.html | 2 +-
htroot/IndexControl_p.java | 16 +++++++++++++---
source/de/anomic/plasma/plasmaCrawlProfile.java | 4 ++--
source/de/anomic/plasma/plasmaCrawlStacker.java | 4 ++--
source/de/anomic/plasma/plasmaSwitchboard.java | 7 +++++++
source/de/anomic/server/serverMemory.java | 10 ++++------
9 files changed, 40 insertions(+), 19 deletions(-)
diff --git a/htroot/AccessTracker_p.java b/htroot/AccessTracker_p.java
index 7a6874269..169c2f0af 100644
--- a/htroot/AccessTracker_p.java
+++ b/htroot/AccessTracker_p.java
@@ -72,7 +72,7 @@ public class AccessTracker_p {
prop.put("page_list_" + entCount + "_countHour", access.tailMap(new Long(System.currentTimeMillis() - 1000 * 60 * 60)).size());
entCount++;
}
- } catch (ConcurrentModificationException e) {} // we dont want to serialize this
+ } catch (ConcurrentModificationException e) {} // we dont want to synchronize this
prop.put("page_list", entCount);
prop.put("page_num", entCount);
}
@@ -84,6 +84,7 @@ public class AccessTracker_p {
if (host.length() > 0) {
access = switchboard.accessTrack(host);
if (access != null) {
+ try {
Iterator ii = access.entrySet().iterator();
while (ii.hasNext()) {
entry = (Map.Entry) ii.next();
@@ -91,13 +92,15 @@ public class AccessTracker_p {
prop.put("page_list_" + entCount + "_date", yacyCore.universalDateShortString(new Date(((Long) entry.getKey()).longValue())));
prop.put("page_list_" + entCount + "_path", (String) entry.getValue());
entCount++;
- }
+ }} catch (ConcurrentModificationException e) {} // we dont want to synchronize this
+
}
} else {
Iterator i = switchboard.accessHosts();
while ((entCount < maxCount) && (i.hasNext())) {
host = (String) i.next();
access = switchboard.accessTrack(host);
+ try {
Iterator ii = access.entrySet().iterator();
while (ii.hasNext()) {
entry = (Map.Entry) ii.next();
@@ -105,7 +108,8 @@ public class AccessTracker_p {
prop.put("page_list_" + entCount + "_date", yacyCore.universalDateShortString(new Date(((Long) entry.getKey()).longValue())));
prop.put("page_list_" + entCount + "_path", (String) entry.getValue());
entCount++;
- }
+ }} catch (ConcurrentModificationException e) {} // we dont want to synchronize this
+
}
}
prop.put("page_list", entCount);
@@ -149,6 +153,7 @@ public class AccessTracker_p {
TreeSet handles;
int entCount = 0;
Map.Entry entry;
+ try {
while ((entCount < maxCount) && (i.hasNext())) {
entry = (Map.Entry) i.next();
host = (String) entry.getKey();
@@ -177,6 +182,7 @@ public class AccessTracker_p {
// next
entCount++;
}
+ } catch (ConcurrentModificationException e) {} // we dont want to synchronize this
prop.put("page_list", entCount);
prop.put("page_num", entCount);
prop.put("page_total", (page == 3) ? switchboard.localSearches.size() : switchboard.remoteSearches.size());
diff --git a/htroot/CrawlStartExpert_p.html b/htroot/CrawlStartExpert_p.html
index 60e0fa073..f5ca9628b 100644
--- a/htroot/CrawlStartExpert_p.html
+++ b/htroot/CrawlStartExpert_p.html
@@ -16,7 +16,7 @@
You can define URLs as start points for Web page crawling and start crawling here. "Crawling" means that YaCy will download the given website, extract all links in it and then download the content behind these links. This is repeated as long as specified under "Crawling Depth".
-