fixed several details about network switching, default password, random password and localhost authentification

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@4830 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 17 years ago
parent d9d1c8de70
commit 4229cd275c

@ -24,7 +24,9 @@
::
<p class="error">Username too short. Username must be >= 4 Characters.</p>
#(/error)#
#(commitIntranetWarning)#::<div class="error">With an intranet indexing configuration it is not allowed to authentify automatically from localhost!</div>#(/commitIntranetWarning)#
#(passwordNotSetWarning)#::<div class="error">No password is set for the administration account. Please define a password for the admin account.</div>#(/passwordNotSetWarning)#
<fieldset><legend>Admin Account</legend>
<form action="ConfigAccounts_p.html" method="post">
<fieldset>

@ -73,21 +73,43 @@ public class ConfigAccounts_p {
String user = (post == null) ? "" : (String) post.get("adminuser", "");
String pw1 = (post == null) ? "" : (String) post.get("adminpw1", "");
String pw2 = (post == null) ? "" : (String) post.get("adminpw2", "");
sb.setConfig("adminAccountForLocalhost", localhostAccess);
// if an localhost access is configured, check if a local password is given
// if not, set a random password
if (post != null && localhostAccess && env.getConfig(httpd.ADMIN_ACCOUNT_B64MD5, "").length() == 0) {
// make a 'random' password
env.setConfig(httpd.ADMIN_ACCOUNT_B64MD5, "0000" + serverCodings.encodeMD5Hex(System.getProperties().toString() + System.currentTimeMillis()));
env.setConfig("adminAccount", "");
}
// may be overwritten if new password is given
if ((user.length() > 0) && (pw1.length() > 3) && (pw1.equals(pw2))) {
// check passed. set account:
env.setConfig(httpd.ADMIN_ACCOUNT_B64MD5, serverCodings.encodeMD5Hex(kelondroBase64Order.standardCoder.encodeString(user + ":" + pw1)));
env.setConfig("adminAccount", "");
}
if (localhostAccess) {
if (sb.acceptLocalURLs) {
// in this case it is not allowed to use a localhostAccess option
prop.put("commitIntranetWarning", 1);
localhostAccess = false;
sb.setConfig("adminAccountForLocalhost", false);
} else {
sb.setConfig("adminAccountForLocalhost", true);
// if an localhost access is configured, check if a local password is given
// if not, set a random password
if (post != null && env.getConfig(httpd.ADMIN_ACCOUNT_B64MD5, "").length() == 0) {
// make a 'random' password
env.setConfig(httpd.ADMIN_ACCOUNT_B64MD5, "0000" + serverCodings.encodeMD5Hex(System.getProperties().toString() + System.currentTimeMillis()));
env.setConfig("adminAccount", "");
}
}
} else {
sb.setConfig("adminAccountForLocalhost", false);
if (env.getConfig(httpd.ADMIN_ACCOUNT_B64MD5, "").startsWith("0000")) {
// make shure that the user can still use the interface after a random password was set
env.setConfig(httpd.ADMIN_ACCOUNT_B64MD5, "");
}
}
}
if (env.getConfig(httpd.ADMIN_ACCOUNT_B64MD5, "").length() == 0 && !env.getConfigBool("adminAccountForLocalhost", false)) {
prop.put("passwordNotSetWarning", 1);
}
prop.put("localhost.checked", (localhostAccess) ? 1 : 0);
prop.put("account.checked", (localhostAccess) ? 0 : 1);
prop.put("statusPassword", localhostAccess ? "0" : "1");

@ -50,6 +50,7 @@
#(commitRobinson)#::<div class="commit">For Robinson Mode, index distribution and receive is switched off.</div>#(/commitRobinson)#
#(commitRobinsonWithRemoteIndexing)#::<div class="commit">This Robinson Mode switches remote indexing on, but limits targets to peers within the same cluster. Remote indexing requests from peers within the same cluster are accepted.</div>#(/commitRobinsonWithRemoteIndexing)#
#(commitRobinsonWithoutRemoteIndexing)#::<div class="commit">This Robinson Mode does not allow any remote indexing (neither requests remote indexing, nor accepts it).</div>#(/commitRobinsonWithoutRemoteIndexing)#
#(commitPasswordWarning)#::<div class="error">With this configuration it is not allowed to authentify automatically from localhost! Please open the <a href="ConfigAccounts_p.html">Account Configuration</a> and set a new password.</div>#(/commitPasswordWarning)#
<form name="NetworkForm" method="post" action="ConfigNetwork_p.html" enctype="multipart/form-data" accept-charset="UTF-8">
<fieldset>
<legend>

@ -29,8 +29,8 @@ import java.io.File;
import java.util.HashSet;
import de.anomic.http.httpHeader;
import de.anomic.http.httpd;
import de.anomic.plasma.plasmaSwitchboard;
import de.anomic.plasma.plasmaWordIndex;
import de.anomic.server.serverBusyThread;
import de.anomic.server.serverCodings;
import de.anomic.server.serverFileUtils;
@ -60,25 +60,11 @@ public class ConfigNetwork_p {
} else {
// shut down old network and index, start up new network and index
commit = 1;
// pause crawls
boolean lcp = sb.crawlJobIsPaused(plasmaSwitchboard.CRAWLJOB_LOCAL_CRAWL);
if (!lcp) sb.pauseCrawlJob(plasmaSwitchboard.CRAWLJOB_LOCAL_CRAWL);
boolean rcp = sb.crawlJobIsPaused(plasmaSwitchboard.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
if (!rcp) sb.pauseCrawlJob(plasmaSwitchboard.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
// trigger online caution
sb.proxyLastAccess = System.currentTimeMillis() + 60000; // at least 1 minute online caution to prevent unnecessary action on database meanwhile
// switch the networks
synchronized (sb.webIndex) {
sb.webIndex.close();
sb.setConfig("network.unit.definition", networkDefinition);
plasmaSwitchboard.overwriteNetworkDefinition(sb);
File indexPrimaryPath = sb.getConfigPath(plasmaSwitchboard.INDEX_PRIMARY_PATH, plasmaSwitchboard.INDEX_PATH_DEFAULT);
File indexSecondaryPath = (sb.getConfig(plasmaSwitchboard.INDEX_SECONDARY_PATH, "").length() == 0) ? indexPrimaryPath : new File(sb.getConfig(plasmaSwitchboard.INDEX_SECONDARY_PATH, ""));
sb.webIndex = new plasmaWordIndex(sb.getConfig("network.unit.name", ""), sb.getLog(), indexPrimaryPath, indexSecondaryPath);
sb.switchNetwork(networkDefinition);
// check if the password is given
if (sb.getConfig(httpd.ADMIN_ACCOUNT_B64MD5, "").length() == 0) {
prop.put("commitPasswordWarning", "1");
}
// start up crawl jobs again
if (lcp) sb.continueCrawlJob(plasmaSwitchboard.CRAWLJOB_LOCAL_CRAWL);
if (rcp) sb.continueCrawlJob(plasmaSwitchboard.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
}
}

@ -42,22 +42,22 @@ public final class indexRAMRI implements indexRI, indexRIReader {
private final kelondroMScoreCluster<String> hashScore;
private final kelondroMScoreCluster<String> hashDate;
private long initTime;
private int cacheMaxCount;
public int cacheReferenceCountLimit;
public long cacheReferenceAgeLimit;
private int cacheEntityMaxCount; // the maximum number of cache slots for RWI entries
public int cacheReferenceCountLimit; // the maximum number of references to a single RWI entity
public long cacheReferenceAgeLimit; // the maximum age (= time not changed) of a RWI entity
private final serverLog log;
private File indexHeapFile;
private indexContainerHeap heap;
@SuppressWarnings("unchecked")
public indexRAMRI(File databaseRoot, kelondroRow payloadrow, int wCacheReferenceCountLimitInit, long wCacheReferenceAgeLimitInit, String oldArrayName, String newHeapName, serverLog log) {
public indexRAMRI(File databaseRoot, kelondroRow payloadrow, int entityCacheMaxSize, int wCacheReferenceCountLimitInit, long wCacheReferenceAgeLimitInit, String oldArrayName, String newHeapName, serverLog log) {
// creates a new index cache
// the cache has a back-end where indexes that do not fit in the cache are flushed
this.hashScore = new kelondroMScoreCluster<String>();
this.hashDate = new kelondroMScoreCluster<String>();
this.initTime = System.currentTimeMillis();
this.cacheMaxCount = 10000;
this.cacheEntityMaxCount = entityCacheMaxSize;
this.cacheReferenceCountLimit = wCacheReferenceCountLimitInit;
this.cacheReferenceAgeLimit = wCacheReferenceAgeLimitInit;
this.log = log;
@ -109,11 +109,11 @@ public final class indexRAMRI implements indexRI, indexRIReader {
}
public void setMaxWordCount(int maxWords) {
this.cacheMaxCount = maxWords;
this.cacheEntityMaxCount = maxWords;
}
public int getMaxWordCount() {
return this.cacheMaxCount;
return this.cacheEntityMaxCount;
}
public int size() {

@ -118,8 +118,8 @@ public final class Identificator {
}
}
//calculate percentage
Iterator iter = testStat.keySet().iterator();
// calculate percentage
Iterator<Character> iter = testStat.keySet().iterator();
Character character;
Character maxChar = null;
float value = 0;

@ -853,10 +853,15 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<IndexingStack.
this.workPath = getConfigPath(WORK_PATH, WORK_PATH_DEFAULT);
this.log.logConfig("Work Path: " + this.workPath.toString());
// set a high maximum cache size to current size; this is adopted later automatically
int wordCacheMaxCount = Math.max((int) getConfigLong(WORDCACHE_INIT_COUNT, 30000),
(int) getConfigLong(WORDCACHE_MAX_COUNT, 20000));
setConfig(WORDCACHE_MAX_COUNT, Integer.toString(wordCacheMaxCount));
// start indexing management
log.logConfig("Starting Indexing Management");
String networkName = getConfig("network.unit.name", "");
webIndex = new plasmaWordIndex(networkName, log, indexPrimaryPath, indexSecondaryPath);
webIndex = new plasmaWordIndex(networkName, log, indexPrimaryPath, indexSecondaryPath, wordCacheMaxCount);
crawlResults = new ResultURLs();
// start yacy core
@ -984,12 +989,6 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<IndexingStack.
//Init bookmarks DB
initBookmarks();
// set a high maximum cache size to current size; this is adopted later automatically
int wordCacheMaxCount = Math.max((int) getConfigLong(WORDCACHE_INIT_COUNT, 30000),
(int) getConfigLong(WORDCACHE_MAX_COUNT, 20000));
setConfig(WORDCACHE_MAX_COUNT, Integer.toString(wordCacheMaxCount));
webIndex.setMaxWordCount(wordCacheMaxCount);
// set a maximum amount of memory for the caches
// long memprereq = Math.max(getConfigLong(INDEXER_MEMPREREQ, 0), wordIndex.minMem());
// setConfig(INDEXER_MEMPREREQ, memprereq);
@ -1251,6 +1250,43 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<IndexingStack.
}
public void switchNetwork(String networkDefinition) {
// pause crawls
boolean lcp = crawlJobIsPaused(CRAWLJOB_LOCAL_CRAWL);
if (!lcp) pauseCrawlJob(CRAWLJOB_LOCAL_CRAWL);
boolean rcp = crawlJobIsPaused(CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
if (!rcp) pauseCrawlJob(CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
// trigger online caution
proxyLastAccess = System.currentTimeMillis() + 60000; // at least 1 minute online caution to prevent unnecessary action on database meanwhile
// switch the networks
synchronized (this.webIndex) {
this.webIndex.close();
setConfig("network.unit.definition", networkDefinition);
overwriteNetworkDefinition(this);
File indexPrimaryPath = getConfigPath(INDEX_PRIMARY_PATH, INDEX_PATH_DEFAULT);
File indexSecondaryPath = (getConfig(INDEX_SECONDARY_PATH, "").length() == 0) ? indexPrimaryPath : new File(getConfig(INDEX_SECONDARY_PATH, ""));
int wordCacheMaxCount = (int) getConfigLong(WORDCACHE_MAX_COUNT, 20000);
this.webIndex = new plasmaWordIndex(getConfig("network.unit.name", ""), getLog(), indexPrimaryPath, indexSecondaryPath, wordCacheMaxCount);
}
// start up crawl jobs again
if (lcp) continueCrawlJob(CRAWLJOB_LOCAL_CRAWL);
if (rcp) continueCrawlJob(CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
// check status of account configuration: when local url crawling is allowed, it is not allowed
// that an automatic authorization of localhost is done, because in this case crawls from local
// addresses are blocked to prevent attack szenarios where remote pages contain links to localhost
// addresses that can steer a YaCy peer
if ((this.acceptLocalURLs) && (getConfigBool("adminAccountForLocalhost", false))) {
setConfig("adminAccountForLocalhost", false);
if (getConfig(httpd.ADMIN_ACCOUNT_B64MD5, "").startsWith("0000")) {
// the password was set automatically with a random value.
// We must remove that here to prevent that a user cannot log in any more
setConfig(httpd.ADMIN_ACCOUNT_B64MD5, "");
// after this a message must be generated to alert the user to set a new password
log.logInfo("RANDOM PASSWORD REMOVED! User must set a new password");
}
}
}
public void initMessages() {
this.log.logConfig("Starting Message Board");
File messageDbFile = new File(workPath, DBFILE_MESSAGE);
@ -1777,7 +1813,7 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<IndexingStack.
boolean hasDoneSomething = false;
// set a random password if no password is configured
if (getConfigBool("adminAccountForLocalhost", false) && getConfig(httpd.ADMIN_ACCOUNT_B64MD5, "").length() == 0) {
if (!this.acceptLocalURLs && getConfigBool("adminAccountForLocalhost", false) && getConfig(httpd.ADMIN_ACCOUNT_B64MD5, "").length() == 0) {
// make a 'random' password
setConfig(httpd.ADMIN_ACCOUNT_B64MD5, "0000" + serverCodings.encodeMD5Hex(System.getProperties().toString() + System.currentTimeMillis()));
setConfig("adminAccount", "");

@ -73,8 +73,8 @@ public final class plasmaWordIndex implements indexRI {
// environment constants
public static final long wCacheMaxAge = 1000 * 60 * 30; // milliseconds; 30 minutes
public static final int wCacheMaxChunk = 800; // maximum number of references for each urlhash
public static final int lowcachedivisor = 1200;
public static final int wCacheMaxChunk = 800; // maximum number of references for each urlhash
public static final int lowcachedivisor = 1200;
public static final int maxCollectionPartition = 7; // should be 7
@ -96,7 +96,7 @@ public final class plasmaWordIndex implements indexRI {
public yacySeedDB seedDB;
public yacyNewsPool newsPool;
private File primaryRoot, secondaryRoot;
public IndexingStack queuePreStack;
public IndexingStack queuePreStack;
public CrawlProfile profilesActiveCrawls, profilesPassiveCrawls;
public CrawlProfile.entry defaultProxyProfile;
public CrawlProfile.entry defaultRemoteProfile;
@ -104,7 +104,7 @@ public final class plasmaWordIndex implements indexRI {
public CrawlProfile.entry defaultMediaSnippetLocalProfile, defaultMediaSnippetGlobalProfile;
private File queuesRoot;
public plasmaWordIndex(String networkName, serverLog log, File indexPrimaryRoot, File indexSecondaryRoot) {
public plasmaWordIndex(String networkName, serverLog log, File indexPrimaryRoot, File indexSecondaryRoot, int entityCacheMaxSize) {
this.log = log;
this.primaryRoot = new File(indexPrimaryRoot, networkName);
this.secondaryRoot = new File(indexSecondaryRoot, networkName);
@ -127,8 +127,8 @@ public final class plasmaWordIndex implements indexRI {
File textindexcache = new File(indexPrimaryTextLocation, "RICACHE");
if (!(textindexcache.exists())) textindexcache.mkdirs();
this.dhtOutCache = new indexRAMRI(textindexcache, indexRWIRowEntry.urlEntryRow, wCacheMaxChunk, wCacheMaxAge, "dump1.array", "index.dhtout.heap", log);
this.dhtInCache = new indexRAMRI(textindexcache, indexRWIRowEntry.urlEntryRow, wCacheMaxChunk, wCacheMaxAge, "dump2.array", "index.dhtin.heap", log);
this.dhtOutCache = new indexRAMRI(textindexcache, indexRWIRowEntry.urlEntryRow, entityCacheMaxSize, wCacheMaxChunk, wCacheMaxAge, "dump1.array", "index.dhtout.heap", log);
this.dhtInCache = new indexRAMRI(textindexcache, indexRWIRowEntry.urlEntryRow, entityCacheMaxSize, wCacheMaxChunk, wCacheMaxAge, "dump2.array", "index.dhtin.heap", log);
// create collections storage path
File textindexcollections = new File(indexPrimaryTextLocation, "RICOLLECTION");

@ -177,10 +177,12 @@ public abstract class serverAbstractSwitch<E> implements serverSwitch<E> {
if (accessPath == null) accessPath="NULL";
TreeMap<Long, String> access = accessTracker.get(host);
if (access == null) access = new TreeMap<Long, String>();
access.put(new Long(System.currentTimeMillis()), accessPath);
// write back to tracker
accessTracker.put(host, clearTooOldAccess(access));
synchronized (access) {
access.put(new Long(System.currentTimeMillis()), accessPath);
// write back to tracker
accessTracker.put(host, clearTooOldAccess(access));
}
}
public TreeMap<Long, String> accessTrack(String host) {
@ -189,15 +191,16 @@ public abstract class serverAbstractSwitch<E> implements serverSwitch<E> {
TreeMap<Long, String> access = accessTracker.get(host);
if (access == null) return null;
// clear too old entries
if ((access = clearTooOldAccess(access)).size() != access.size()) {
// write back to tracker
if (access.size() == 0) {
accessTracker.remove(host);
} else {
accessTracker.put(host, access);
synchronized (access) {
if ((access = clearTooOldAccess(access)).size() != access.size()) {
// write back to tracker
if (access.size() == 0) {
accessTracker.remove(host);
} else {
accessTracker.put(host, access);
}
}
}
return access;
}

@ -643,7 +643,7 @@ public final class yacy {
int cacheMem = (int)(serverMemory.max() - serverMemory.total());
if (cacheMem < 2048000) throw new OutOfMemoryError("Not enough memory available to start clean up.");
plasmaWordIndex wordIndex = new plasmaWordIndex(networkName, log, indexPrimaryRoot, indexSecondaryRoot);
plasmaWordIndex wordIndex = new plasmaWordIndex(networkName, log, indexPrimaryRoot, indexSecondaryRoot, 10000);
Iterator<indexContainer> indexContainerIterator = wordIndex.wordContainers("AAAAAAAAAAAA", false, false);
long urlCounter = 0, wordCounter = 0;
@ -834,7 +834,7 @@ public final class yacy {
try {
Iterator<indexContainer> indexContainerIterator = null;
if (resource.equals("all")) {
WordIndex = new plasmaWordIndex("freeworld", log, indexPrimaryRoot, indexSecondaryRoot);
WordIndex = new plasmaWordIndex("freeworld", log, indexPrimaryRoot, indexSecondaryRoot, 10000);
indexContainerIterator = WordIndex.wordContainers(wordChunkStartHash, false, false);
}
int counter = 0;

Loading…
Cancel
Save