From 40d7f485f3ec30dd21b42ac0cdf13ff4a2a8a1f8 Mon Sep 17 00:00:00 2001
From: orbiter
Date: Wed, 4 Jun 2008 22:24:00 +0000
Subject: [PATCH] - fixed several NPE bugs - fixed loosing of own seed hash
(hopefully) - fixed a bug with crawl start s beginning with (bookmark) files
- added better IP recognition during hello process
git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@4882 6c8d7289-2bf4-0310-a012-ef5d649a1542
---
htroot/CrawlStart_p.html | 2 +-
htroot/WatchCrawler_p.java | 2 +-
htroot/yacy/hello.java | 19 ++++++++-------
htroot/yacy/message.java | 2 +-
htroot/yacy/search.java | 2 +-
.../de/anomic/plasma/plasmaDHTTransfer.java | 1 +
.../de/anomic/plasma/plasmaSnippetCache.java | 5 ++--
.../de/anomic/plasma/plasmaSwitchboard.java | 2 +-
source/de/anomic/yacy/yacyClient.java | 8 +++----
source/de/anomic/yacy/yacyCore.java | 4 ++--
source/de/anomic/yacy/yacyPeerActions.java | 2 +-
source/de/anomic/yacy/yacySeed.java | 24 ++++++++++++-------
source/de/anomic/yacy/yacySeedDB.java | 16 ++++++-------
13 files changed, 48 insertions(+), 41 deletions(-)
diff --git a/htroot/CrawlStart_p.html b/htroot/CrawlStart_p.html
index 8aeb5cc8d..2531e6e29 100644
--- a/htroot/CrawlStart_p.html
+++ b/htroot/CrawlStart_p.html
@@ -16,7 +16,7 @@
You can define URLs as start points for Web page crawling and start crawling here. "Crawling" means that YaCy will download the given website, extract all links in it and then download the content behind these links. This is repeated as long as specified under "Crawling Depth".
-