remove redundant null checks

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@5065 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
danielr 17 years ago
parent f9a715dc33
commit 3c68905540

@ -424,8 +424,7 @@ public class SettingsAck_p {
for (int i=0; i<configOptions.length; i++) {
final String newSettings = post.get(configOptions[i],"");
final String oldSettings = env.getConfig(configOptions[i],"");
// FIXME should this be: nC = nC && newSettings.equals()
// or (current): nC = nC & newSettings.equals()
// bitwise AND with boolean is same as logic AND
nothingChanged &= newSettings.equals(oldSettings);
if (!nothingChanged) {
env.setConfig(configOptions[i],newSettings);

@ -322,7 +322,7 @@ public class CrawlQueues {
if (seed == null) return false;
// we know a peer which should provide remote crawl entries. load them now.
final RSSFeed feed = (seed == null) ? null : yacyClient.queryRemoteCrawlURLs(sb.webIndex.seedDB, seed, 20);
final RSSFeed feed = yacyClient.queryRemoteCrawlURLs(sb.webIndex.seedDB, seed, 20);
if (feed == null) return true;
// parse the rss
yacyURL url, referrer;

@ -150,16 +150,17 @@ public final class userDB {
return entry;
}
/**
* determinate, if a user has Adminrights from a authorisation http-headerfield
* it tests both userDB and oldstyle adminpw.
* @param auth the http-headerline for authorisation
* determinate, if a user has Adminrights from a authorisation http-headerfield it tests both userDB and oldstyle adminpw.
*
* @param auth
* the http-headerline for authorisation
*/
public boolean hasAdminRight(final String auth, final String ip, final String cookies){
final Entry entry=getUser(auth, ip, cookies);
if(entry != null)
return entry.hasAdminRight();
else if(entry != null && cookieAdminAuth(cookies))
public boolean hasAdminRight(final String auth, final String ip, final String cookies) {
final Entry entry = getUser(auth, ip, cookies);
if (entry != null)
return entry.hasAdminRight();
// else if(entry != null && cookieAdminAuth(cookies))
// return entry.hasAdminRight();
else
return false;
}

@ -73,9 +73,9 @@ public class kelondroBLOBArray implements kelondroBLOB {
// check existence of the heap directory
if (heapLocation.exists()) {
if (!heapLocation.isDirectory()) throw new IOException("the BLOBArray directory " + heapLocation.toString() + " does not exist (is blocked by a file with same name");
if (!heapLocation.isDirectory()) throw new IOException("the BLOBArray directory " + heapLocation.toString() + " does not exist (is blocked by a file with same name)");
} else {
heapLocation.mkdirs();
if(!heapLocation.mkdirs()) throw new IOException("the BLOBArray directory " + heapLocation.toString() + " does not exist (can not be created)");
}
// register all blob files inside this directory
@ -91,7 +91,7 @@ public class kelondroBLOBArray implements kelondroBLOB {
} catch (ParseException e) {continue;}
f = new File(heapLocation, files[i]);
oneBlob = new kelondroBLOBHeap(f, keylength, ordering);
sortedItems.put(new Long(d.getTime()), new blobItem(d, f, oneBlob));
sortedItems.put(Long.valueOf(d.getTime()), new blobItem(d, f, oneBlob));
}
}

@ -263,10 +263,10 @@ public class kelondroRowCollection {
return b;
}
public final kelondroRow.Entry get(final int index, final boolean clone) {
public synchronized final kelondroRow.Entry get(final int index, final boolean clone) {
assert (index >= 0) : "get: access with index " + index + " is below zero";
assert (index < chunkcount) : "get: access with index " + index + " is above chunkcount " + chunkcount + "; sortBound = " + sortBound;
assert (index * rowdef.objectsize < chunkcache.length);
assert (chunkcache != null && index * rowdef.objectsize < chunkcache.length);
assert sortBound <= chunkcount : "sortBound = " + sortBound + ", chunkcount = " + chunkcount;
if ((chunkcache == null) || (rowdef == null)) return null; // case may appear during shutdown
kelondroRow.Entry entry;

@ -115,14 +115,12 @@ public final class plasmaSearchRankingProcess {
// join and exclude the local result
timer = System.currentTimeMillis();
final indexContainer index =
(this.localSearchContainerMaps == null) ?
plasmaWordIndex.emptyContainer(null, 0) :
indexContainer.joinExcludeContainers(
this.localSearchContainerMaps[0].values(),
this.localSearchContainerMaps[1].values(),
query.maxDistance);
indexContainer.joinExcludeContainers(
this.localSearchContainerMaps[0].values(),
this.localSearchContainerMaps[1].values(),
query.maxDistance);
serverProfiling.update("SEARCH", new plasmaProfiling.searchEvent(query.id(true), plasmaSearchEvent.JOIN, index.size(), System.currentTimeMillis() - timer));
if ((index == null) || (index.size() == 0)) {
if (index.size() == 0) {
return;
}

@ -424,7 +424,7 @@ public final class plasmaWordIndex implements indexRI {
// flush elements that are too big. This flushing depends on the fact that the flush rule
// selects the biggest elements first for flushing. If it does not for any reason, the following
// loop would not terminate.
serverProfiling.update("wordcache", new Long(cs));
serverProfiling.update("wordcache", Long.valueOf(cs));
// To ensure termination an additional counter is used
int l = 0;
while ((l++ < 100) && (theCache.maxURLinCache() > wCacheMaxChunk)) {
@ -435,7 +435,7 @@ public final class plasmaWordIndex implements indexRI {
(serverMemory.available() < collections.minMem())) {
flushCache(theCache, Math.min(theCache.size() - theCache.getMaxWordCount() + 1, theCache.size()));
}
if (cacheSize() != cs) serverProfiling.update("wordcache", new Long(cacheSize()));
if (cacheSize() != cs) serverProfiling.update("wordcache", Long.valueOf(cacheSize()));
}
}

Loading…
Cancel
Save