- inserted request object into response object which carries this now instead generating new objects

- fixed a problem with the crawler introduced in SVN 6216

git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@6222 6c8d7289-2bf4-0310-a012-ef5d649a1542
pull/1/head
orbiter 16 years ago
parent ca72ed7526
commit b332dfad67

@ -120,7 +120,7 @@ public class IndexCreateWWWLocalQueue_p {
case ANCHOR: value = entry.name(); break;
case DEPTH: value = Integer.toString(entry.depth()); break;
case INITIATOR:
value = (entry.initiator() == null) ? "proxy" : entry.initiator();
value = (entry.initiator() == null || entry.initiator().length() == 0) ? "proxy" : entry.initiator();
break;
case MODIFIED: value = daydate(entry.loaddate()); break;
default: value = null;

@ -109,7 +109,7 @@ public class Balancer {
}
}
public Request get(final String urlhash) throws IOException {
public synchronized Request get(final String urlhash) throws IOException {
assert urlhash != null;
if (urlFileIndex == null) return null; // case occurs during shutdown
final Row.Entry entry = urlFileIndex.get(urlhash.getBytes());
@ -117,7 +117,7 @@ public class Balancer {
return new Request(entry);
}
public int removeAllByProfileHandle(final String profileHandle, final long timeout) throws IOException {
public synchronized int removeAllByProfileHandle(final String profileHandle, final long timeout) throws IOException {
// removes all entries with a specific profile hash.
// this may last some time
// returns number of deletions
@ -148,7 +148,7 @@ public class Balancer {
* @return number of entries that had been removed
* @throws IOException
*/
public int remove(final HashSet<String> urlHashes) throws IOException {
public synchronized int remove(final HashSet<String> urlHashes) throws IOException {
final int s = urlFileIndex.size();
int removedCounter = 0;
for (final String urlhash: urlHashes) {
@ -191,7 +191,7 @@ public class Balancer {
return removedCounter;
}
public boolean has(final String urlhash) {
public synchronized boolean has(final String urlhash) {
return urlFileIndex.has(urlhash.getBytes());
}
@ -306,48 +306,50 @@ public class Balancer {
long sleeptime = 0;
Request crawlEntry = null;
while (this.urlFileIndex.size() > 0) {
// first simply take one of the entries in the top list, that should be one without any delay
String result = nextFromDelayed();
if (result == null && this.top.size() > 0) result = top.remove();
// check minimumDelta and if necessary force a sleep
//final int s = urlFileIndex.size();
Row.Entry rowEntry = (result == null) ? null : urlFileIndex.remove(result.getBytes());
if (rowEntry == null) {
rowEntry = urlFileIndex.removeOne();
result = (rowEntry == null) ? null : new String(rowEntry.getPrimaryKeyBytes());
}
if (rowEntry == null) {
Log.logWarning("Balancer", "removeOne() failed - size = " + this.size());
return null;
}
//assert urlFileIndex.size() + 1 == s : "urlFileIndex.size() = " + urlFileIndex.size() + ", s = " + s + ", result = " + result;
crawlEntry = new Request(rowEntry);
//Log.logInfo("Balancer", "fetched next url: " + crawlEntry.url().toNormalform(true, false));
// at this point we must check if the crawlEntry has relevancy because the crawl profile still exists
// if not: return null. A calling method must handle the null value and try again
if (profile != null && !profile.hasEntry(crawlEntry.profileHandle())) {
profileErrors++;
if (profileErrors < 20) Log.logInfo("Balancer", "no profile entry for handle " + crawlEntry.profileHandle());
return null;
}
sleeptime = Latency.waitingRemaining(crawlEntry.url(), minimumLocalDelta, minimumGlobalDelta); // this uses the robots.txt database and may cause a loading of robots.txt from the server
assert result.equals(new String(rowEntry.getPrimaryKeyBytes())) : "result = " + result + ", rowEntry.getPrimaryKeyBytes() = " + new String(rowEntry.getPrimaryKeyBytes());
assert result.equals(crawlEntry.url().hash()) : "result = " + result + ", crawlEntry.url().hash() = " + crawlEntry.url().hash();
if (this.domainStacks.size() <= 1) break;
if (delay && sleeptime > 0) {
// put that thing back to omit a delay here
this.delayed.put(new Long(System.currentTimeMillis() + sleeptime + 1), result);
this.urlFileIndex.put(rowEntry);
this.domainStacks.remove(result.substring(6));
continue;
}
break;
synchronized (this) {
while (this.urlFileIndex.size() > 0) {
// first simply take one of the entries in the top list, that should be one without any delay
String result = nextFromDelayed();
if (result == null && this.top.size() > 0) result = top.remove();
// check minimumDelta and if necessary force a sleep
//final int s = urlFileIndex.size();
Row.Entry rowEntry = (result == null) ? null : urlFileIndex.remove(result.getBytes());
if (rowEntry == null) {
rowEntry = urlFileIndex.removeOne();
result = (rowEntry == null) ? null : new String(rowEntry.getPrimaryKeyBytes());
}
if (rowEntry == null) {
Log.logWarning("Balancer", "removeOne() failed - size = " + this.size());
return null;
}
//assert urlFileIndex.size() + 1 == s : "urlFileIndex.size() = " + urlFileIndex.size() + ", s = " + s + ", result = " + result;
crawlEntry = new Request(rowEntry);
//Log.logInfo("Balancer", "fetched next url: " + crawlEntry.url().toNormalform(true, false));
// at this point we must check if the crawlEntry has relevancy because the crawl profile still exists
// if not: return null. A calling method must handle the null value and try again
if (profile != null && !profile.hasEntry(crawlEntry.profileHandle())) {
profileErrors++;
if (profileErrors < 20) Log.logInfo("Balancer", "no profile entry for handle " + crawlEntry.profileHandle());
return null;
}
sleeptime = Latency.waitingRemaining(crawlEntry.url(), minimumLocalDelta, minimumGlobalDelta); // this uses the robots.txt database and may cause a loading of robots.txt from the server
assert result.equals(new String(rowEntry.getPrimaryKeyBytes())) : "result = " + result + ", rowEntry.getPrimaryKeyBytes() = " + new String(rowEntry.getPrimaryKeyBytes());
assert result.equals(crawlEntry.url().hash()) : "result = " + result + ", crawlEntry.url().hash() = " + crawlEntry.url().hash();
if (this.domainStacks.size() <= 1) break;
if (delay && sleeptime > 0) {
// put that thing back to omit a delay here
this.delayed.put(new Long(System.currentTimeMillis() + sleeptime + 1), result);
this.urlFileIndex.put(rowEntry);
this.domainStacks.remove(result.substring(6));
continue;
}
break;
}
}
if (crawlEntry == null) return null;
@ -438,16 +440,18 @@ public class Balancer {
count = Math.min(count, top.size());
ArrayList<Request> cel = new ArrayList<Request>();
if (count == 0) return cel;
for (String n: top) {
try {
Row.Entry rowEntry = urlFileIndex.get(n.getBytes());
if (rowEntry == null) continue;
final Request crawlEntry = new Request(rowEntry);
cel.add(crawlEntry);
count--;
if (count <= 0) break;
} catch (IOException e) {
}
synchronized (this) {
for (String n: top) {
try {
Row.Entry rowEntry = urlFileIndex.get(n.getBytes());
if (rowEntry == null) continue;
final Request crawlEntry = new Request(rowEntry);
cel.add(crawlEntry);
count--;
if (count <= 0) break;
} catch (IOException e) {
}
}
}
return cel;
}

@ -277,7 +277,7 @@ public final class CrawlStacker {
// store information
final boolean local = entry.initiator().equals(peers.mySeed().hash);
final boolean proxy = (entry.initiator() == null || entry.initiator().equals("------------")) && profile.handle().equals(crawler.defaultProxyProfile.handle());
final boolean proxy = (entry.initiator() == null || entry.initiator().length() == 0 || entry.initiator().equals("------------")) && profile.handle().equals(crawler.defaultProxyProfile.handle());
final boolean remote = profile.handle().equals(crawler.defaultRemoteProfile.handle());
final boolean global =
(profile.remoteIndexing()) /* granted */ &&

@ -157,6 +157,7 @@ public class Latency {
// first check if the domain was _ever_ accessed before
String hosthash = url.hash().substring(6);
Host host = host(hosthash);
if (host == null) return 0; // no delay if host is new
// the time since last access to the domain is the basis of the remaining calculation
final long timeSinceLastAccess = (host == null) ? 0 : System.currentTimeMillis() - host.lastacc();

@ -56,17 +56,19 @@ public class FTPLoader {
maxFileSize = (int) sb.getConfigLong("crawler.ftp.maxFileSize", -1l);
}
protected Response createCacheEntry(final Request entry, final String mimeType, final Date fileDate) {
if (entry == null) return null;
protected Response createCacheEntry(final Request request, final String mimeType, final Date fileDate) {
if (request == null) return null;
httpRequestHeader requestHeader = new httpRequestHeader();
if (entry.referrerhash() != null) requestHeader.put(httpRequestHeader.REFERER, sb.getURL(entry.referrerhash()).toNormalform(true, false));
if (request.referrerhash() != null) requestHeader.put(httpRequestHeader.REFERER, sb.getURL(request.referrerhash()).toNormalform(true, false));
httpResponseHeader responseHeader = new httpResponseHeader();
responseHeader.put(httpHeader.LAST_MODIFIED, DateFormatter.formatRFC1123(fileDate));
responseHeader.put(httpHeader.CONTENT_TYPE, mimeType);
Response metadata = new Response(
entry.depth(), entry.url(), entry.name(), "OK",
requestHeader, responseHeader,
entry.initiator(), sb.crawler.profilesActiveCrawls.getEntry(entry.profileHandle()));
request,
requestHeader,
responseHeader,
"OK",
sb.crawler.profilesActiveCrawls.getEntry(request.profileHandle()));
plasmaHTCache.storeMetadata(responseHeader, metadata);
return metadata;
}
@ -245,7 +247,7 @@ public class FTPLoader {
// download the remote file
byte[] b = ftpClient.get(path);
htCache.setCacheArray(b);
htCache.setContent(b);
} else {
log.logInfo("REJECTED TOO BIG FILE with size " + size + " Bytes for URL " + entry.url().toString());
sb.crawlQueues.errorURL.newEntry(entry, this.sb.peers.mySeed().hash, new Date(), 1, "file size limit exceeded");

@ -83,16 +83,13 @@ public final class HTTPLoader {
* @param responseStatus Status-Code SPACE Reason-Phrase
* @return
*/
protected Response createCacheEntry(final Request entry, final Date requestDate, final httpRequestHeader requestHeader, final httpResponseHeader responseHeader, final String responseStatus) {
protected Response createCacheEntry(final Request request, final Date requestDate, final httpRequestHeader requestHeader, final httpResponseHeader responseHeader, final String responseStatus) {
Response metadata = new Response(
entry.depth(),
entry.url(),
entry.name(),
responseStatus,
requestHeader,
request,
requestHeader,
responseHeader,
entry.initiator(),
sb.crawler.profilesActiveCrawls.getEntry(entry.profileHandle())
responseStatus,
sb.crawler.profilesActiveCrawls.getEntry(request.profileHandle())
);
plasmaHTCache.storeMetadata(responseHeader, metadata);
return metadata;
@ -193,7 +190,7 @@ public final class HTTPLoader {
throw new IOException("REJECTED URL " + entry.url() + " because file size '" + contentLength + "' exceeds max filesize limit of " + maxFileSize + " bytes. (GET)");
}
htCache.setCacheArray(responseBody);
htCache.setContent(responseBody);
return htCache;
/*

@ -107,10 +107,9 @@ public class Request extends serverProcessorJob {
) {
// create new entry and store it into database
assert url != null;
assert initiator != null;
assert profileHandle == null || profileHandle.length() == yacySeedDB.commonHashLength : profileHandle + " != " + yacySeedDB.commonHashLength;
url.removeRef(); // remove anchor reference
this.initiator = initiator;
this.initiator = (initiator == null) ? null : ((initiator.length() == 0) ? null : initiator);
this.url = url;
this.refhash = (referrerhash == null) ? "" : referrerhash;
this.name = (name == null) ? "" : name;
@ -138,6 +137,7 @@ public class Request extends serverProcessorJob {
final String urlstring = entry.getColString(2, null);
if (urlstring == null) throw new IOException ("url string is null");
this.initiator = entry.getColString(1, null);
this.initiator = (initiator == null) ? null : ((initiator.length() == 0) ? null : initiator);
this.url = new yacyURL(urlstring, entry.getColString(0, null));
this.refhash = (entry.empty(3)) ? "" : entry.getColString(3, null);
this.name = (entry.empty(4)) ? "" : entry.getColString(4, "UTF-8").trim();

@ -52,16 +52,13 @@ public class Response {
public static final char DT_UNKNOWN = 'u';
// the class objects
private final int depth; // the depth of pre-fetching
private final Request request;
private final httpRequestHeader requestHeader;
private final httpResponseHeader responseHeader;
private final String responseStatus;
private byte[] cacheArray; //
private final yacyURL url;
private final String name; // the name of the link, read as anchor from an <a>-tag
private final CrawlProfile.entry profile;
private final String initiator;
private httpRequestHeader requestHeader;
private httpResponseHeader responseHeader;
private byte[] content; //
// doctype calculation
public static char docType(final yacyURL url) {
final String path = url.getPath().toLowerCase();
@ -134,58 +131,41 @@ public class Response {
}
public Response(
final int depth,
final yacyURL url,
final String name,
final String responseStatus,
Request request,
final httpRequestHeader requestHeader,
final httpResponseHeader responseHeader,
final String initiator,
final String responseStatus,
final CrawlProfile.entry profile) {
if (responseHeader == null) {
System.out.println("Response header information is null. " + url);
System.exit(0);
}
assert responseHeader != null;
this.request = request;
this.requestHeader = requestHeader;
this.responseHeader = responseHeader;
this.url = url;
this.name = name;
// assigned:
this.depth = depth;
this.responseStatus = responseStatus;
this.profile = profile;
// the initiator is the hash of the peer that caused the hash entry
// it is stored here only to track processed in the peer and this
// information is not permanently stored in the web index after the queue has
// been processed
// in case of proxy usage, the initiator hash is null,
// which distinguishes local crawling from proxy indexing
this.initiator = (initiator == null) ? null : ((initiator.length() == 0) ? null : initiator);
// to be defined later:
this.cacheArray = null;
this.content = null;
}
public String name() {
// the anchor name; can be either the text inside the anchor tag or the
// page description after loading of the page
return this.name;
return this.request.name();
}
public yacyURL url() {
return this.url;
return this.request.url();
}
public char docType() {
char doctype = docType(getMimeType());
if (doctype == DT_UNKNOWN) doctype = docType(url);
if (doctype == DT_UNKNOWN) doctype = docType(url());
return doctype;
}
public String urlHash() {
return this.url.hash();
return this.url().hash();
}
public Date lastModified() {
@ -211,7 +191,7 @@ public class Response {
}
public String initiator() {
return this.initiator;
return this.request.initiator();
}
public boolean proxy() {
@ -219,7 +199,7 @@ public class Response {
}
public long size() {
if (this.cacheArray != null) return this.cacheArray.length;
if (this.content != null) return this.content.length;
if (this.responseHeader != null) {
// take the size from the response header
return this.responseHeader.getContentLength();
@ -229,15 +209,15 @@ public class Response {
}
public int depth() {
return this.depth;
return this.request.depth();
}
public void setCacheArray(final byte[] data) {
this.cacheArray = data;
public void setContent(final byte[] data) {
this.content = data;
}
public byte[] cacheArray() {
return this.cacheArray;
public byte[] getContent() {
return this.content;
}
// the following three methods for cache read/write granting shall be as loose
@ -268,10 +248,10 @@ public class Response {
// -CGI access in request
// CGI access makes the page very individual, and therefore not usable
// in caches
if (this.url.isPOST() && !this.profile.crawlingQ()) {
if (this.url().isPOST() && !this.profile.crawlingQ()) {
return "dynamic_post";
}
if (this.url.isCGI()) {
if (this.url().isCGI()) {
return "dynamic_cgi";
}
@ -349,10 +329,10 @@ public class Response {
// -CGI access in request
// CGI access makes the page very individual, and therefore not usable
// in caches
if (this.url.isPOST()) {
if (this.url().isPOST()) {
return false;
}
if (this.url.isCGI()) {
if (this.url().isCGI()) {
return false;
}

@ -72,6 +72,7 @@ import java.util.logging.Logger;
import java.util.zip.GZIPOutputStream;
import de.anomic.crawler.retrieval.HTTPLoader;
import de.anomic.crawler.retrieval.Request;
import de.anomic.crawler.retrieval.Response;
import de.anomic.data.Blacklist;
import de.anomic.document.Parser;
@ -347,9 +348,7 @@ public final class httpdProxyHandler {
// handle outgoing cookies
handleOutgoingCookies(requestHeader, host, ip);
prepareRequestHeader(conProp, requestHeader, hostlow);
httpResponseHeader cachedResponseHeader = plasmaHTCache.loadResponseHeader(url);
// why are files unzipped upon arrival? why not zip all files in cache?
@ -378,16 +377,23 @@ public final class httpdProxyHandler {
if (theLogger.isFinest()) theLogger.logFinest(reqID + " page not in cache: fulfill request from web");
fulfillRequestFromWeb(conProp, url, requestHeader, cachedResponseHeader, countedRespond);
} else {
final Request request = new Request(
null,
url,
requestHeader.referer(),
"",
new Date(),
new Date(),
sb.crawler.defaultProxyProfile.handle(),
0,
0,
0);
final Response cacheEntry = new Response(
0, // crawling depth
url, // url
"", // name of the url is unknown
//requestHeader, // request headers
"200 OK", // request status
request,
requestHeader,
cachedResponseHeader,
null, // initiator
sb.crawler.defaultProxyProfile // profile
"200 OK",
sb.crawler.defaultProxyProfile
);
plasmaHTCache.storeMetadata(cachedResponseHeader, cacheEntry); // TODO: check if this storeMetadata is necessary
@ -492,14 +498,22 @@ public final class httpdProxyHandler {
}
// reserver cache entry
final Request request = new Request(
null,
url,
requestHeader.referer(),
"",
new Date(),
new Date(),
sb.crawler.defaultProxyProfile.handle(),
0,
0,
0);
final Response cacheEntry = new Response(
0,
url,
"",
res.getStatusLine(),
request,
requestHeader,
responseHeader,
null,
res.getStatusLine(),
sb.crawler.defaultProxyProfile
);
plasmaHTCache.storeMetadata(responseHeader, cacheEntry);
@ -560,7 +574,7 @@ public final class httpdProxyHandler {
if (sizeBeforeDelete == -1) {
// totally fresh file
//cacheEntry.status = plasmaHTCache.CACHE_FILL; // it's an insert
cacheEntry.setCacheArray(cacheArray);
cacheEntry.setContent(cacheArray);
sb.htEntryStoreProcess(cacheEntry);
conProp.setProperty(httpHeader.CONNECTION_PROP_PROXY_RESPOND_CODE,"TCP_MISS");
} else if (cacheArray != null && sizeBeforeDelete == cacheArray.length) {
@ -572,7 +586,7 @@ public final class httpdProxyHandler {
} else {
// before we came here we deleted a cache entry
//cacheEntry.status = plasmaHTCache.CACHE_STALE_RELOAD_GOOD;
cacheEntry.setCacheArray(cacheArray);
cacheEntry.setContent(cacheArray);
sb.htEntryStoreProcess(cacheEntry);
conProp.setProperty(httpHeader.CONNECTION_PROP_PROXY_RESPOND_CODE,"TCP_REFRESH_MISS");
}

@ -30,6 +30,8 @@ import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
import de.anomic.kelondro.util.FileUtils;
/**
* The kelondroBufferedEcoFS extends the IO reduction to EcoFS by providing a
@ -61,7 +63,7 @@ public class BufferedEcoFS {
}
public synchronized long size() throws IOException {
return efs == null ? 0 : efs.size();
return efs == null ? 0 : efs.size(); // stuck
}
public File filename() {
@ -117,7 +119,7 @@ public class BufferedEcoFS {
assert b.length - start >= efs.recordsize;
final byte[] bb = buffer.remove(Long.valueOf(size() - 1));
if (bb == null) {
efs.cleanLast(b, start);
efs.cleanLast(b, start); // stuck
} else {
System.arraycopy(bb, 0, b, start, efs.recordsize);
efs.cleanLast();
@ -133,4 +135,54 @@ public class BufferedEcoFS {
efs.deleteOnExit();
}
/**
* main - writes some data and checks the tables size (with time measureing)
* @param args
*/
public static void main(final String[] args) {
// open a file, add one entry and exit
final File f = new File(args[0]);
if (f.exists()) FileUtils.deletedelete(f);
try {
final EcoFS t = new EcoFS(f, 8);
final byte[] b = new byte[8];
t.add("01234567".getBytes(), 0);
t.add("ABCDEFGH".getBytes(), 0);
t.add("abcdefgh".getBytes(), 0);
t.add("--------".getBytes(), 0);
t.add("********".getBytes(), 0);
for (int i = 0; i < 1000; i++) t.add("++++++++".getBytes(), 0);
t.add("=======0".getBytes(), 0);
t.add("=======1".getBytes(), 0);
t.add("=======2".getBytes(), 0);
t.cleanLast(b, 0);
System.out.println(new String(b));
t.cleanLast(b, 0);
//t.clean(2, b, 0);
System.out.println(new String(b));
t.get(1, b, 0);
System.out.println(new String(b));
t.put(1, "AbCdEfGh".getBytes(), 0);
t.get(1, b, 0);
System.out.println(new String(b));
t.get(3, b, 0);
System.out.println(new String(b));
t.get(4, b, 0);
System.out.println(new String(b));
System.out.println("size = " + t.size());
//t.clean(t.size() - 2);
t.cleanLast();
final long start = System.currentTimeMillis();
long c = 0;
for (int i = 0; i < 100000; i++) {
c = t.size();
}
System.out.println("size() needs " + ((System.currentTimeMillis() - start) / 100) + " nanoseconds");
System.out.println("size = " + c);
t.close();
} catch (final IOException e) {
e.printStackTrace();
}
}
}

@ -566,6 +566,10 @@ public class EcoFS {
assert this.buffercount == 0;
this.raf.setLength((s - 1) * this.recordsize);
}
public void deleteOnExit() {
this.tablefile.deleteOnExit();
}
/**
* main - writes some data and checks the tables size (with time measureing)
@ -589,6 +593,7 @@ public class EcoFS {
t.add("=======2".getBytes(), 0);
t.cleanLast(b, 0);
System.out.println(new String(b));
t.cleanLast(b, 0);
//t.clean(2, b, 0);
System.out.println(new String(b));
t.get(1, b, 0);
@ -616,9 +621,5 @@ public class EcoFS {
e.printStackTrace();
}
}
public void deleteOnExit() {
this.tablefile.deleteOnExit();
}
}

@ -417,7 +417,7 @@ public class Table implements ObjectIndex {
}
public synchronized void put(final Entry row) throws IOException {
assert file == null || file.size() == index.size() + fail : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert file == null || file.size() == index.size() + fail : "file.size() = " + file.size() + ", index.size() = " + index.size() + ", fail = " + fail;
assert table == null || table.size() == index.size();
assert row != null;
assert row.bytes() != null;
@ -555,7 +555,10 @@ public class Table implements ObjectIndex {
assert file.size() == index.size() + fail : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert ((table == null) || (table.size() == index.size()));
final byte[] le = new byte[rowdef.objectsize];
long fsb = file.size();
assert fsb != 0 : "file.size() = " + fsb;
file.cleanLast(le, 0);
assert file.size() < fsb : "file.size() = " + file.size();
final Row.Entry lr = rowdef.newEntry(le);
final int i = (int) index.remove(lr.getPrimaryKeyBytes());
assert i >= 0;

@ -76,7 +76,8 @@ public class ReferenceContainerOrder<ReferenceType extends Reference> extends Ab
return this.embeddedOrder.cardinal(key);
}
public boolean equals(final Order<ReferenceContainer<ReferenceType>> otherOrder) {
@SuppressWarnings("unchecked")
public boolean equals(final Order<ReferenceContainer<ReferenceType>> otherOrder) {
if (!(otherOrder instanceof ReferenceContainerOrder)) return false;
return this.embeddedOrder.equals(((ReferenceContainerOrder<ReferenceType>) otherOrder).embeddedOrder);
}

@ -1147,11 +1147,11 @@ public final class plasmaSwitchboard extends serverAbstractSwitch<IndexingStack.
*/
// work off unwritten files
if (entry.cacheArray() != null) {
if (entry.getContent() != null) {
final String error = (entry.initiator() == null) ? entry.shallStoreCacheForProxy() : null;
if (error == null) {
plasmaHTCache.storeFile(entry.url(), entry.cacheArray());
if (this.log.isFine()) this.log.logFine("WROTE FILE (" + entry.cacheArray().length + " bytes) for " + entry.url());
plasmaHTCache.storeFile(entry.url(), entry.getContent());
if (this.log.isFine()) this.log.logFine("WROTE FILE (" + entry.getContent().length + " bytes) for " + entry.url());
} else {
if (this.log.isWarning()) this.log.logWarning("WRITE OF FILE " + entry.url() + " FORBIDDEN: " + error);
}

@ -362,7 +362,7 @@ public class SnippetCache {
sb.htEntryStoreProcess(entry);
// read resource body (if it is there)
final byte []resourceArray = entry.cacheArray();
final byte []resourceArray = entry.getContent();
if (resourceArray != null) {
resContent = new ByteArrayInputStream(resourceArray);
resContentLength = resourceArray.length;
@ -472,7 +472,7 @@ public class SnippetCache {
if (entry != null) {
// read resource body (if it is there)
final byte[] resourceArray = entry.cacheArray();
final byte[] resourceArray = entry.getContent();
if (resourceArray != null) {
resContent = new ByteArrayInputStream(resourceArray);
resContentLength = resourceArray.length;
@ -909,7 +909,7 @@ public class SnippetCache {
if (entry == null) return null; // not found in web
// read resource body (if it is there)
final byte[] resourceArray = entry.cacheArray();
final byte[] resourceArray = entry.getContent();
// in case that the resource was not in ram, read it from disk
if (resourceArray == null) {

@ -87,8 +87,8 @@ public class ymageOSM {
Log.logWarning("yamyOSM", "cannot load: " + e.getMessage());
return null;
}
if ((entry == null) || (entry.cacheArray() == null)) return null;
tileStream = new ByteArrayInputStream(entry.cacheArray());
if ((entry == null) || (entry.getContent() == null)) return null;
tileStream = new ByteArrayInputStream(entry.getContent());
}
try {
return ImageIO.read(tileStream);

Loading…
Cancel
Save