removes some warning and unused objects

pull/389/head
sgaebel 4 years ago
parent 4a495df63a
commit fc03c4b4fe

@ -620,6 +620,7 @@ public final class CrawlStacker implements WorkflowTask<Request>{
// returns true if the url can be accepted according to network.unit.domain
if (urlhash == null) return "url is null";
// check if this is a local address and we are allowed to index local pages:
@SuppressWarnings("deprecation")
final boolean local = DigestURL.isLocal(urlhash);
if (this.acceptLocalURLs && local) return null;
if (this.acceptGlobalURLs && !local) return null;

@ -127,12 +127,10 @@ public class OAIListFriendsLoader implements Serializable {
private SAXParser saxParser;
private final InputStream stream;
private Attributes atts;
private int recordCounter;
private final TreeMap<String, String> map;
public Parser(final byte[] b) {
this.map = new TreeMap<String, String>();
this.recordCounter = 0;
this.buffer = new StringBuilder();
this.parsingValue = false;
this.atts = null;
@ -164,14 +162,9 @@ public class OAIListFriendsLoader implements Serializable {
</BaseURLs>
*/
public int getCounter() {
return this.recordCounter;
}
@Override
public void startElement(final String uri, final String name, final String tag, final Attributes atts) throws SAXException {
if ("baseURL".equals(tag)) {
this.recordCounter++;
this.parsingValue = true;
this.atts = atts;
}

@ -32,6 +32,12 @@ import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.poi.hwpf.HWPFDocumentCore;
import org.apache.poi.hwpf.OldWordFileFormatException;
import org.apache.poi.hwpf.extractor.Word6Extractor;
import org.apache.poi.hwpf.extractor.WordExtractor;
import org.apache.poi.poifs.filesystem.POIFSFileSystem;
import net.yacy.cora.document.id.DigestURL;
import net.yacy.cora.util.CommonPattern;
import net.yacy.document.AbstractParser;
@ -39,12 +45,6 @@ import net.yacy.document.Document;
import net.yacy.document.Parser;
import net.yacy.document.VocabularyScraper;
import org.apache.poi.hwpf.HWPFDocument;
import org.apache.poi.hwpf.OldWordFileFormatException;
import org.apache.poi.hwpf.extractor.Word6Extractor;
import org.apache.poi.hwpf.extractor.WordExtractor;
import org.apache.poi.poifs.filesystem.POIFSFileSystem;
public class docParser extends AbstractParser implements Parser {
public docParser() {
@ -74,7 +74,7 @@ public class docParser extends AbstractParser implements Parser {
final WordExtractor extractor;
POIFSFileSystem poifs = null;
try {
poifs = HWPFDocument.verifyAndBuildPOIFS(source); // to be able to delegate to parseOldWordDoc w/o source.ioException
poifs = HWPFDocumentCore.verifyAndBuildPOIFS(source); // to be able to delegate to parseOldWordDoc w/o source.ioException
extractor = new WordExtractor(poifs);
} catch (final OldWordFileFormatException isOldWordDoc) {
// if old version (Word6/Word95) delegate to old parser (as long as available in poi package)

@ -1488,12 +1488,12 @@ public final class Protocol {
this.docs = docs;
}
/**
* Use this to stop writing operation. This thread will not stop immediately as Solr might be writing something.
*/
public void stopWriting() {
this.stop.set(true);
}
// /**
// * Use this to stop writing operation. This thread will not stop immediately as Solr might be writing something.
// */
// public void stopWriting() {
// this.stop.set(true);
// }
@Override
public void run() {

@ -1976,6 +1976,9 @@ public final class SearchEvent implements ScoreMapUpdatesListener {
break;
case APP:
r += rentry.lapp() << this.query.ranking.coeff_cathasapp;
break;
default:
break;
}
// apply citation count

@ -27,6 +27,9 @@
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
package net.yacy.utils.translation;
import static javax.xml.stream.XMLStreamConstants.END_ELEMENT;
import static javax.xml.stream.XMLStreamConstants.START_ELEMENT;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
@ -37,10 +40,10 @@ import java.util.LinkedHashMap;
import java.util.Locale;
import java.util.Map;
import java.util.TreeMap;
import javax.xml.stream.XMLInputFactory;
import javax.xml.stream.XMLStreamException;
import javax.xml.stream.XMLStreamReader;
import javax.xml.stream.events.XMLEvent;
import net.yacy.cora.util.ConcurrentLog;
import net.yacy.data.Translator;
@ -97,7 +100,7 @@ public class TranslatorXliff extends Translator {
while (xmlreader.hasNext()) {
int eventtype = xmlreader.next();
if (eventtype == XMLEvent.START_ELEMENT) {
if (eventtype == START_ELEMENT) {
String ename = xmlreader.getLocalName();
// setup for 'file' section (get or add translationlist for this file)
@ -120,7 +123,7 @@ public class TranslatorXliff extends Translator {
state = xmlreader.getAttributeValue(null, "state");
target = xmlreader.getElementText(); // TODO: in full blown xliff, target may contain sub-xml elements (but we use only text)
}
} else if (eventtype == XMLEvent.END_ELEMENT) {
} else if (eventtype == END_ELEMENT) {
String ename = xmlreader.getLocalName();
// store source/target on finish of trans-unit

@ -1,8 +1,8 @@
package net.yacy.cora.document.feed;
import java.io.IOException;
import static org.junit.Assert.assertNotNull;
import org.junit.Test;
import static org.junit.Assert.*;
public class RSSFeedTest {
@ -10,7 +10,7 @@ public class RSSFeedTest {
* Test of getChannel method, of class RSSFeed.
*/
@Test
public void testGetChannel() throws IOException {
public void testGetChannel() {
RSSFeed feed = new RSSFeed(Integer.MAX_VALUE);
// channel is required in RSS 2.0 and accessed in code w/o != null checks

@ -62,7 +62,7 @@ public class DigestURLHashPerfTest {
System.out.println(urls.size() + " URLs loaded from " + inFile.getAbsolutePath());
try (OutputStream outStream = args.length >= 2 ? new FileOutputStream(args[1]) : new NullOutputStream();
try (OutputStream outStream = args.length >= 2 ? new FileOutputStream(args[1]) : NullOutputStream.NULL_OUTPUT_STREAM;
OutputStreamWriter writer = new OutputStreamWriter(outStream, StandardCharsets.UTF_8.name());
BufferedWriter out = new BufferedWriter(writer);) {

@ -1,16 +1,18 @@
package net.yacy.search.ranking;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import org.apache.solr.common.SolrInputDocument;
import org.junit.Test;
import net.yacy.cora.document.analysis.Classification;
import net.yacy.cora.document.id.DigestURL;
import net.yacy.cora.util.SpaceExceededException;
import net.yacy.kelondro.data.meta.URIMetadataNode;
import net.yacy.search.schema.CollectionConfiguration;
import org.apache.solr.common.SolrInputDocument;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
public class ReferenceOrderTest {
@ -20,7 +22,7 @@ public class ReferenceOrderTest {
* (only used if no Solr score supplied)
*/
@Test
public void testCardinal_URIMetadataNode() throws MalformedURLException, IOException, SpaceExceededException {
public void testCardinal_URIMetadataNode() throws MalformedURLException, IOException {
File config = new File("defaults/solr.collection.schema");
CollectionConfiguration cc = new CollectionConfiguration(config, true);

Loading…
Cancel
Save