Cleaned up some Javadoc warnings.

pull/105/head
luccioman 8 years ago
parent 86dc198698
commit 6a4d51d8f9

@ -52,7 +52,7 @@ public class ISO8601Formatter extends AbstractFormatter implements DateFormatter
}
/**
* Parse dates as defined in {@linkplain http://www.w3.org/TR/NOTE-datetime}.
* Parse dates as defined in <a href="http://www.w3.org/TR/NOTE-datetime">http://www.w3.org/TR/NOTE-datetime</a>.
* This format (also specified in ISO8601) allows different "precisions".
* The following lower precision versions for the complete date
* "2007-12-19T10:20:30.567+0300" are allowed:<br>

@ -100,7 +100,7 @@ import org.apache.http.util.EntityUtils;
/**
* HttpClient implementation which uses HttpComponents Client {@link http://hc.apache.org/}
* HttpClient implementation which uses <a href="http://hc.apache.org/">HttpComponents Client</a>.
*
* @author sixcooler
*

@ -65,7 +65,7 @@ public class ListAccumulator {
/**
* Adds a new entry to a list identified by a given name.
* @param key The name of the list the entry is to be added to.
* @param list The name of the list the entry is to be added to.
* @param entry The new entry.
* @return True if the entry has been added, else false (if list does not exists).
*/
@ -125,16 +125,14 @@ public class ListAccumulator {
}
/**
* Returns a {@link List} which contains all the {@link List Lists} of entries.
* @return list of lists.
* @return a {@link List} which contains all the {@link List Lists} of entries.
*/
public List<List<String>> getEntryLists() {
return entries;
}
/**
* Returns a {@link List} which contains all the {@link Map Maps} of entries.
* @return
* @return a {@link List} which contains all the {@link Map Maps} of entries.
*/
public List<Map<String,String>> getPropertyMaps() {
return properties;

@ -170,7 +170,7 @@ public class XMLBlacklistImporter extends DefaultHandler {
* Writes characters to a String which might be used by endElement() later.
* @param ch The characters.
* @param start The start position in the character array.
* @param lengthThe number of characters to use from the character array.
* @param length The number of characters to use from the character array.
* @throws org.xml.sax.SAXException
*/
@Override

@ -45,21 +45,21 @@ public class YMarkUtil {
public final static Pattern FOLDERS_SEPARATOR_PATTERN = Pattern.compile(FOLDERS_SEPARATOR);
/**
* conveniance function to generate url hashes for YMark bookmarks
* convenience function to generate url hashes for YMark bookmarks
* @param url a string representation of a valid url
* @return a byte[] hash for the input URL string
* @throws MalformedURLException
* @see net.yacy.kelondro.data.meta.DigestURI.DigestURI(String url, byte[] hash).hash()
* @see {@link DigestURL#hash()}
*/
public final static byte[] getBookmarkId(String url) throws MalformedURLException {
return (new DigestURL(url)).hash();
}
/**
* conveniance function to generate word hashes for YMark tags and folders
* convenience function to generate word hashes for YMark tags and folders
* @param key a tag or folder name
* @return a byte[] hash for the input string
* @see net.yacy.kelondro.data.word.Word.word2hash(final String word)
* @see net.yacy.kelondro.data.word.Word#word2hash(String)
*/
public final static byte[] getKeyId(final String key) {
return Word.word2hash(key.toLowerCase());

@ -76,8 +76,8 @@ public abstract class AbstractParser implements Parser {
/**
* check equivalence of parsers; this simply tests equality of parser names
* @param o
* @return
* @param o the object to check. Must be a {@link Parser} implementation.
* @return true when this parser is equivalent to o
*/
@Override
public boolean equals(final Object o) {

@ -765,7 +765,7 @@ dc_rights
* This is similar to mergeDocuments but directly joins internal content variables,
* uses less parsed details and keeps this documents crawl data (like crawldepth, lastmodified)
*
* @see mergeDocuments()
* @see #mergeDocuments(DigestURL, String, Document[])
* @param docs to be included
* @throws IOException
*/

@ -41,7 +41,7 @@ public class LargeNumberCache {
* Returns a Integer instance representing the specified int value.
* If a new Integer instance is not required, this method
* should generally be used in preference to the constructor
* {@link #Integer(int)}, as this method is likely to yield
* {@link Integer#Integer(int)}, as this method is likely to yield
* significantly better space and time performance by caching
* frequently requested values.
*

@ -99,12 +99,11 @@ public class LibraryProvider {
}
/**
* initialize the LibraryProvider as static class. This assigns default paths, and initializes the
* Initialize the LibraryProvider as static class. This assigns default paths, and initializes the
* dictionary classes Additionally, if default dictionaries are given in the source path, they are
* translated into the input format inside the DATA/DICTIONARIES directory
*
* @param pathToSource
* @param pathToDICTIONARIES
* @param rootPath parent path of dictionaries
*/
public static void initialize(final File rootPath) {
dictSource = new File(rootPath, path_to_source_dictionaries);

@ -74,8 +74,7 @@ public interface Parser {
/**
* check equivalence of parsers; this simply tests equality of parser names
* @param o
* @return
* @return true when this parser is equivalent to o
*/
@Override
public boolean equals(Object o);

@ -433,7 +433,7 @@ public final class TextParser {
/**
* checks if the parser supports the given extension. It is not only checked if the parser can parse such files,
* it is also checked if the extension is not included in the extension-deny list.
* @param extention
* @param ext extension name
* @return an error if the extension is not supported, null otherwise
*/
public static String supportsExtension(final String ext) {
@ -448,9 +448,9 @@ public final class TextParser {
}
/**
* checks if the parser supports the given extension. It is not only checked if the parser can parse such files,
* checks if the parser supports the given extension or the file at the specified url. It is not only checked if the parser can parse such files,
* it is also checked if the extension is not included in the extension-deny list.
* @param extention
* @param url url to check
* @return an error if the extension is not supported, null otherwise
*/
public static String supportsExtension(final MultiProtocolURL url) {

@ -122,7 +122,7 @@ public class DCEntry extends MultiMapSolrParams {
/**
* get Identifier (url) (so far only used for surrogate processing)
* @param useRelationAsAlternative true = take relation if no identifier resolves to url
* @return
* @return this entry identifier url
*/
public DigestURL getIdentifier(boolean useRelationAsAlternative) {
// identifier may be included multiple times (with all kinds of syntax - example is from on record)

@ -30,21 +30,18 @@ public interface Importer extends Runnable {
public int count();
/**
* return the number of articles per second
* @return
* @return the number of articles per second
*/
public int speed();
/**
* return the time this import is already running
* @return
* @return the time this import is already running
*/
public long runningTime();
/**
* return the remaining seconds for the completion of all records in milliseconds
* @return
* @return the remaining seconds for the completion of all records in milliseconds
*/
public long remainingTime();

@ -123,8 +123,7 @@ public class MediawikiImporter extends Thread implements Importer {
}
/**
* return the number of articles per second
* @return
* @return the number of articles per second
*/
@Override
public int speed() {
@ -133,8 +132,7 @@ public class MediawikiImporter extends Thread implements Importer {
}
/**
* return the remaining seconds for the completion of all records in milliseconds
* @return
* @return the remaining seconds for the completion of all records in milliseconds
*/
@Override
public long remainingTime() {

@ -88,8 +88,7 @@ public class ResumptionToken extends TreeMap<String, String> {
/**
* compute a url that can be used to resume the retrieval from the OAI-PMH resource
* @param givenURL
* @return
* @return the computed url
* @throws IOException in case that no follow-up url can be generated; i.e. if the expiration date is exceeded
*/
public DigestURL resumptionURL() throws IOException {
@ -149,10 +148,10 @@ public class ResumptionToken extends TreeMap<String, String> {
}
/**
* an expiration date of a resumption token that addresses how long a cached set will
* stay in the cache of the oai-pmh server. See:
* http://www.openarchives.org/OAI/2.0/guidelines-repository.htm#CachedResultSet
* @return
* See:
* <a href="http://www.openarchives.org/OAI/2.0/guidelines-repository.htm#CachedResultSet">Implementation Guidelines for the Open Archives</a>
* @return an expiration date of a resumption token that addresses how long a cached set will
* stay in the cache of the oai-pmh server
*/
public Date getExpirationDate() {
final String d = get("expirationDate");
@ -174,8 +173,8 @@ public class ResumptionToken extends TreeMap<String, String> {
* In other cases, it is permissible for repositories to revise
* the estimate during a list request sequence.
* An attribute according to
* http://www.openarchives.org/OAI/2.0/guidelines-repository.htm#completeListSize
* @return
* <a href="http://www.openarchives.org/OAI/2.0/guidelines-repository.htm#completeListSize">Implementation Guidelines for the Open Archives</a>
* @return the completeListSize attribute
*/
public int getCompleteListSize() {
final String t = get("completeListSize");
@ -188,8 +187,8 @@ public class ResumptionToken extends TreeMap<String, String> {
* thus it is always "0" in the first incomplete list response.
* It should only be specified if it is consistently used in all responses.
* An attribute according to
* http://www.openarchives.org/OAI/2.0/guidelines-repository.htm#completeListSize
* @return
* <a href="http://www.openarchives.org/OAI/2.0/guidelines-repository.htm#completeListSize">Implementation Guidelines for the Open Archives</a>
* @return the cursor attribute
*/
public int getCursor() {
final String t = get("cursor");
@ -198,10 +197,9 @@ public class ResumptionToken extends TreeMap<String, String> {
}
/**
* get a token of the stateless transfer in case that no expiration date is given
* see:
* http://www.openarchives.org/OAI/2.0/guidelines-repository.htm#StateInResumptionToken
* @return
* See:
* <a href="http://www.openarchives.org/OAI/2.0/guidelines-repository.htm#StateInResumptionToken">Implementation Guidelines for the Open Archives</a>
* @return a token of the stateless transfer in case that no expiration date is given
*/
public String getToken() {
return get("token");

@ -376,9 +376,9 @@ public class apkParser extends AbstractParser implements Parser {
* Return the string stored in StringTable format at offset strOff.
* This offset points to the 16 bit string length, which
* is followed by that number of 16 bit (Unicode) chars.
* @param arr
* @param strOff
* @return
* @param arr source byte array
* @param strOff offset position
* @return the computed string
*/
public String compXmlStringAt(byte[] arr, int strOff) {
int strLen = arr[strOff + 1] << 8 & 0xff00 | arr[strOff] & 0xff;
@ -392,10 +392,9 @@ public class apkParser extends AbstractParser implements Parser {
}
/**
* Return value of a Little Endian 32 bit word from the byte array at offset off.
* @param arr
* @param off
* @return
* @param arr source byte array
* @param off byte array offset position
* @return value of a Little Endian 32 bit word from the byte array at offset off.
*/
public int LEW(byte[] arr, int off) {
return arr[off + 3] << 24 & 0xff000000 | arr[off + 2] << 16 & 0xff0000 | arr[off + 1] << 8 & 0xff00 | arr[off] & 0xFF;

@ -141,7 +141,7 @@ public class docParser extends AbstractParser implements Parser {
* @param location
* @param mimeType
* @param poifs
* @return
* @return an array containing one Document
* @throws net.yacy.document.Parser.Failure
*/
public Document[] parseOldWordDoc(

@ -217,8 +217,9 @@ public class ContentScraper extends AbstractScraper implements Scraper {
/**
* scrape a document
* @param root the document root url
* @param maxLinks the maximum number of links to scapre
* @param classDetector a map from class names to vocabulary names to scrape content from the DOM with associated class name
* @param maxLinks the maximum number of links to scrape
* @param vocabularyScraper handles maps from class names to vocabulary names and from documents to a map from vocabularies to terms
* @param timezoneOffset local time zone offset
*/
@SuppressWarnings("unchecked")
public ContentScraper(final DigestURL root, int maxLinks, final VocabularyScraper vocabularyScraper, int timezoneOffset) {

@ -41,15 +41,15 @@ import net.yacy.cora.util.ConcurrentLog;
import net.yacy.kelondro.util.MemoryControl;
/*
/**
* This class provides methods to use a pattern analysis for html files
* The pattern analysis is generic and can be configured using a field-name/pattern property
* configuration file.
* Such a configuration file has names of the structure
* <subject-name>_<document-element>
* &lt;subject-name&gt;_&lt;document-element&gt;
* and values are regular java expressions
* A html file is scanned for pattern matchings within a specific <document-element>
* and if such a matching can be found then the <attribute-name> is collected as
* A html file is scanned for pattern matchings within a specific &lt;document-element&gt;
* and if such a matching can be found then the &lt;attribute-name&gt; is collected as
* subject for the scanned document
* patternProperties files must have special file names where the file name
* starts with the word "parser." and must end with ".properties"
@ -192,10 +192,10 @@ public class Evaluation {
}
/**
* calculate the scores for a model
* the scores is a attribute/count map which count how often a specific attribute was found
* Calculate the scores for a model.
* The scores is a attribute/count map which count how often a specific attribute was found
* @param modelName
* @return
* @return the calculated scores
*/
public ClusteredScoreMap<String> getScores(final String modelName) {
return this.modelMap.get(modelName);

@ -41,8 +41,6 @@ public class ImageEntry implements Comparable<ImageEntry>, Comparator<ImageEntry
* an ImageEntry represents the appearance of an image in a document. It considers also that an image can be used as an button for a web link
* and stores the web link also.
* @param imageurl the link to the image
* @param linkurl the link which is called when the image is pressed on a web browser. null if the image was not used as link button
* @param anchortext the text inside the anchor body where the image link appears (including the image tag). null if the image was not used as link button
* @param alt the als text in the alt tag
* @param width the width of the image if known, or -1 if unknown
* @param height the height of the image if known, or -1 if unknown

@ -219,7 +219,7 @@ public final class TransformerWriter extends Writer {
/**
*
* @param content
* @return
* @return content or empty array
*/
private char[] filterTag(final char[] content) {
if (this.tagStack.size() == 0) {

@ -134,7 +134,7 @@ public class htmlParser extends AbstractParser implements Parser {
* @param mimeType
* @param charSet
* @param scraper
* @return
* @return a Document instance
*/
private Document transformScraper(final DigestURL location, final String mimeType, final String charSet, final ContentScraper scraper) {
final String[] sections = new String[

@ -210,9 +210,9 @@ public class Switchboard {
/**
* convenience access to boolean values in properties
* @param key
* @param dflt
* @return
* @param key property key
* @param dflt default value
* @return the boolean value from properties or dflt when it is not defined
*/
public static boolean getBool(String key, boolean dflt) {
if (!properties.containsKey(key)) return dflt;

@ -1686,9 +1686,9 @@ public final class Switchboard extends serverSwitch {
}
/**
* in nocheck mode the isLocal property is not checked to omit DNS lookup. Can only be done in allip mode
* In nocheck mode the isLocal property is not checked to omit DNS lookup. Can only be done in allip mode
*
* @return
* @return true when in nocheck mode
*/
public boolean isIPNoCheckMode() {
return isAllIPMode() && getConfigBool(SwitchboardConstants.NETWORK_DOMAIN_NOCHECK, false);
@ -1788,7 +1788,7 @@ public final class Switchboard extends serverSwitch {
* exceeded, null is returned. If a limit is exceeded, then the name of the service that caused the
* caution is returned
*
* @return
* @return null or a service name
*/
public String onlineCaution() {
if ( System.currentTimeMillis() - this.proxyLastAccess < Integer.parseInt(getConfig(
@ -1834,12 +1834,12 @@ public final class Switchboard extends serverSwitch {
}
/**
* {@link CrawlProfiles Crawl Profiles} are saved independently from the queues themselves and therefore
* {@link CrawlProfile Crawl Profiles} are saved independently from the queues themselves and therefore
* have to be cleaned up from time to time. This method only performs the clean-up if - and only if - the
* {@link IndexingStack switchboard}, {@link LoaderDispatcher loader} and {@link plasmaCrawlNURL local
* {@link Switchboard switchboard}, {@link LoaderDispatcher loader} and {@link CrawlQueues local
* crawl} queues are all empty.
* <p>
* Then it iterates through all existing {@link CrawlProfiles crawl profiles} and removes all profiles
* Then it iterates through all existing {@link CrawlProfile crawl profiles} and removes all profiles
* which are not hard-coded.
* </p>
* <p>
@ -1847,10 +1847,6 @@ public final class Switchboard extends serverSwitch {
* will be returned</i>
* </p>
*
* @see #CRAWL_PROFILE_PROXY hardcoded
* @see #CRAWL_PROFILE_REMOTE hardcoded
* @see #CRAWL_PROFILE_SNIPPET_TEXT hardcoded
* @see #CRAWL_PROFILE_SNIPPET_MEDIA hardcoded
* @return whether this method has done something or not (i.e. because the queues have been filled or
* there are no profiles left to clean up)
* @throws <b>InterruptedException</b> if the current thread has been interrupted, i.e. by the shutdown
@ -3319,12 +3315,12 @@ public final class Switchboard extends serverSwitch {
}
/**
* load the content of a URL, parse the content and add the content to the index This process is started
* load the content of some URLs, parse the content and add the content to the index This process is started
* concurrently. The method returns immediately after the call.
* Loaded/indexed pages are added to the given SearchEvent. If this is not required prefer addToCrawler
* to spare concurrent processes, bandwidth and intransparent crawl/load activity
*
* @param url the url that shall be indexed
* @param urls the urls that shall be indexed
* @param searchEvent (optional) a search event that shall get results from the indexed pages directly
* feeded. If object is null then it is ignored
* @throws IOException
@ -3413,11 +3409,11 @@ public final class Switchboard extends serverSwitch {
}
/**
* add url to Crawler - which itself loads the URL, parses the content and adds it to the index
* add urls to Crawler - which itself loads the URL, parses the content and adds it to the index
* transparent alternative to "addToIndex" including, double in crawler check, display in crawl monitor
* but doesn't return results for a ongoing search
*
* @param url the url that shall be indexed
* @param urls the urls that shall be indexed
* @param asglobal true adds the url to global crawl queue (for remote crawling), false to the local crawler
*/
public void addToCrawler(final Collection<DigestURL> urls, final boolean asglobal) {

@ -26,7 +26,7 @@
package net.yacy.search;
import net.yacy.kelondro.util.MapTools;
import net.yacy.cora.order.Digest;
import net.yacy.server.http.RobotsTxtConfig;
/**
@ -39,7 +39,7 @@ public final class SwitchboardConstants {
/**
* <p><code>public static final String <strong>ADMIN_ACCOUNT_B64MD5</strong> = "adminAccountBase64MD5"</code></p>
* <p>Name of the setting holding the authentication hash for the static <code>admin</code>-account. It is calculated
* by first encoding <code>username:password</code> as Base64 and hashing it using {@link MapTools#encodeMD5Hex(String)}.</p>
* by first encoding <code>username:password</code> as Base64 and hashing it using {@link Digest#encodeMD5Hex(String)}.</p>
* With introduction of DIGEST authentication all passwords are MD5 encoded and calculatd as <code>username:adminrealm:password</code>
* To differentiate old and new admin passwords, use the new calculated passwords a "MD5:" prefix.
*/
@ -101,7 +101,6 @@ public final class SwitchboardConstants {
* <p>Name of the local crawler thread, popping one entry off the Local Crawl Queue, and passing it to the
* proxy cache enqueue thread to download and further process it</p>
*
* @see Switchboard#PROXY_CACHE_ENQUEUE
*/
public static final String CRAWLJOB_LOCAL_CRAWL = "50_localcrawl";
public static final String CRAWLJOB_LOCAL_CRAWL_METHOD_START = "coreCrawlJob";
@ -126,7 +125,7 @@ public final class SwitchboardConstants {
* <p><code>public static final String <strong>CRAWLJOB_REMOTE_CRAWL_LOADER</strong> = "60_remotecrawlloader"</code></p>
* <p>Name of the remote crawl list loading thread</p>
*
* @see Switchboard#CRAWLJOB_REMOTE_CRAWL_LOADER
* @see #CRAWLJOB_REMOTE_CRAWL_LOADER
*/
public static final String CRAWLJOB_REMOTE = "crawlResponse"; // enable/disable response to remote crawl requests
public static final String CRAWLJOB_REMOTE_CRAWL_LOADER = "60_remotecrawlloader";
@ -211,7 +210,7 @@ public final class SwitchboardConstants {
* <p><code>public static final String <strong>INDEX_DIST_ALLOW</strong> = "allowDistributeIndex"</code></p>
* <p>Name of the setting whether Index Distribution shall be allowed (and the DHT-thread therefore started) or not</p>
*
* @see Switchboard#INDEX_DIST_ALLOW_WHILE_CRAWLING
* @see #INDEX_DIST_ALLOW_WHILE_CRAWLING
*/
public static final String INDEX_DIST_ALLOW = "allowDistributeIndex";
public static final String INDEX_RECEIVE_ALLOW = "allowReceiveIndex";
@ -224,7 +223,7 @@ public final class SwitchboardConstants {
* the Local Crawler Queue is filled.</p>
* <p>This setting only has effect if {@link #INDEX_DIST_ALLOW} is enabled</p>
*
* @see Switchboard#INDEX_DIST_ALLOW
* @see #INDEX_DIST_ALLOW
*/
public static final String INDEX_DIST_ALLOW_WHILE_CRAWLING = "allowDistributeIndexWhileCrawling";
public static final String INDEX_DIST_ALLOW_WHILE_INDEXING = "allowDistributeIndexWhileIndexing";
@ -251,14 +250,6 @@ public final class SwitchboardConstants {
public static final String PROXY_INDEXING_LOCAL_TEXT = "proxyIndexingLocalText";
public static final String PROXY_INDEXING_LOCAL_MEDIA = "proxyIndexingLocalMedia";
public static final String PROXY_CACHE_SIZE = "proxyCacheSize";
/**
* <p><code>public static final String <strong>PROXY_CACHE_LAYOUT</strong> = "proxyCacheLayout"</code></p>
* <p>Name of the setting which file-/folder-layout the proxy cache shall use. Possible values are {@link #PROXY_CACHE_LAYOUT_TREE}
* and {@link #PROXY_CACHE_LAYOUT_HASH}</p>
*
* @see Switchboard#PROXY_CACHE_LAYOUT_TREE
* @see Switchboard#PROXY_CACHE_LAYOUT_HASH
*/
public static final String PROXY_YACY_ONLY = "proxyYacyOnly";
public static final String PROXY_TRANSPARENT_PROXY = "isTransparentProxy";
@ -360,8 +351,6 @@ public final class SwitchboardConstants {
* <p>Name of the setting specifying the folder beginning from the YaCy-installation's top-folder, where all
* downloaded webpages and their respective ressources and HTTP-headers are stored. It is the location containing
* the proxy-cache</p>
*
* @see Switchboard#PROXY_CACHE_LAYOUT for details on the file-layout in this path
*/
public static final String HTCACHE_PATH = "proxyCache";
public static final String HTCACHE_PATH_DEFAULT = "DATA/HTCACHE";
@ -415,13 +404,6 @@ public final class SwitchboardConstants {
* <p>Name of the setting specifying the folder beginning from the YaCy-installation's top-folder, where all
* DBs containing "work" of the user are saved. Such include bookmarks, messages, wiki, blog</p>
*
* @see Switchboard#DBFILE_BLOG
* @see Switchboard#DBFILE_BOOKMARKS
* @see Switchboard#DBFILE_BOOKMARKS_DATES
* @see Switchboard#DBFILE_BOOKMARKS_TAGS
* @see Switchboard#DBFILE_MESSAGE
* @see Switchboard#DBFILE_WIKI
* @see Switchboard#DBFILE_WIKI_BKP
*/
public static final String WORK_PATH = "workPath";
public static final String WORK_PATH_DEFAULT = "DATA/WORK";

@ -262,8 +262,7 @@ public final class Fulltext {
}
/**
* get the size of the webgraph index
* @return
* @return the size of the webgraph index
*/
public long webgraphSize() {
return this.writeWebgraph ? this.getWebgraphConnector().getSize() : 0;
@ -390,7 +389,7 @@ public final class Fulltext {
/**
* using a fragment of the url hash (6 bytes: bytes 6 to 11) it is possible to address all urls from a specific domain
* here such a fragment can be used to delete all these domains at once
* @param hosthash the hash of the host to be deleted
* @param hosthashes the hashes of the hosts to be deleted
* @param freshdate either NULL or a date in the past which is the limit for deletion. Only documents older than this date are deleted
* @throws IOException
*/
@ -460,9 +459,8 @@ public final class Fulltext {
/**
* remove a full subpath from the index
* @param subpath the left path of the url; at least until the end of the host
* @param basepath the left path of the url; at least until the end of the host
* @param freshdate either NULL or a date in the past which is the limit for deletion. Only documents older than this date are deleted
* @param concurrently if true, then the method returnes immediately and runs concurrently
*/
public int remove(final String basepath, Date freshdate) {
DigestURL uri;
@ -510,7 +508,7 @@ public final class Fulltext {
* Deletes document with id=urlHash from fulltext index and document with
* source_id_s=urlHash from webgraph index
* @param urlHash the document id
* @return
* @return false
*/
public boolean remove(final byte[] urlHash) {
if (urlHash == null) return false;
@ -570,7 +568,7 @@ public final class Fulltext {
/**
* create a dump file from the current solr directory
* @return
* @return file reference to the dump
*/
public File dumpSolr() {
EmbeddedInstance esc = this.solrInstances.getEmbedded();

@ -126,10 +126,9 @@ public class Segment {
/**
* create a new Segment
* @param log
* @param segmentPath that should be the path ponting to the directory "SEGMENT"
* @param collectionSchema
* @throws IOException
* @param log logger instance
* @param segmentPath that should be the path pointing to the directory "SEGMENT"
* @throws IOException when an error occurs
*/
public Segment(final ConcurrentLog log, final File segmentPath, final File archivePath,
final CollectionConfiguration collectionConfiguration, final WebgraphConfiguration webgraphConfiguration) throws IOException {
@ -399,7 +398,7 @@ public class Segment {
/**
* get the load time of a resource.
* @param urlHash
* @param urlhash the resource hash
* @return the time in milliseconds since epoch for the load time or -1 if the document does not exist
*/
public long getLoadTime(final String urlhash) throws IOException {

@ -36,8 +36,7 @@ public class LanguageNavigator extends StringNavigator implements Navigator {
/**
* Default constructor, using the default YaCy Solr field language_s.
*
* @param title
* @param field the SolrDocument schema field containing language code
* @param title the navigator display name
*/
public LanguageNavigator(String title) {
super(title, CollectionSchema.language_s);

@ -80,7 +80,7 @@ public interface Navigator extends ScoreMap<String> {
* The navigator looks for a field in the document and increases the counts
* depending on the value in the document field.
*
* @param docs document
* @param doc document
*/
public void incDoc(URIMetadataNode doc);

@ -81,7 +81,6 @@ public class RestrictedStringNavigator extends StringNavigator implements Naviga
/**
* Increase counter if item allowed and not forbidden
* @param key
*/
@Override
public void inc(ScoreMap<String> map) {

@ -148,9 +148,9 @@ public class YearNavigator extends StringNavigator implements Navigator {
/**
* For date_in_content_dts it return true if form:YEAR and to:YEAR is part
* of the modifier, otherwise false.
* @param modifier
* @param modifier the search query modifier
* @param name 4 digit year string
* @return
* @return true when when the modifier is active
*/
@Override
public boolean modifieractive(QueryModifier modifier, String name) {

@ -203,7 +203,7 @@ public class QueryGoal {
* the modifier are held separately in a search paramter modifier
*
* @param encodeHTML
* @return
* @return the search query string
*/
public String getQueryString(final boolean encodeHTML) {
if (this.query_original == null) return null;

@ -669,11 +669,7 @@ public final class QueryParams {
/**
* make a query anchor tag
* @param page
* @param theQuery
* @param originalUrlMask
* @param addToQuery
* @return
* @return the anchor url builder
*/
public static StringBuilder navurl(final RequestHeader.FileType ext, final int page, final QueryParams theQuery, final String newQueryString, boolean newModifierReplacesOld) {

@ -218,7 +218,6 @@ public class CollectionConfiguration extends SchemaConfiguration implements Seri
* @param doc
* @param allAttr
* @param digestURL
* @param doctype
* @return the normalized url
*/
public String addURIAttributes(final SolrInputDocument doc, final boolean allAttr, final DigestURL digestURL) {
@ -2104,13 +2103,13 @@ public class CollectionConfiguration extends SchemaConfiguration implements Seri
return a;
}
/**
* encode a string containing attributes from anchor rel properties binary:
* bit 0: "me" contained in rel
* bit 1: "nofollow" contained in rel
* @param rel
* @return binary encoded information about rel
*/
// /**
// * encode a string containing attributes from anchor rel properties binary:
// * bit 0: "me" contained in rel
// * bit 1: "nofollow" contained in rel
// * @param rel
// * @return binary encoded information about rel
// */
/*
private static List<Integer> relEval(final List<String> rel) {
List<Integer> il = new ArrayList<Integer>(rel.size());

@ -412,7 +412,7 @@ public class TextSnippet implements Comparable<TextSnippet>, Comparator<TextSnip
/**
* Marks all words in current line which have the same
* hash values as the ones contained in argument.
* @param queryHashes hashes of search words
* @param queryGoal the query goal
* @return line with marked words
*/
public String getLineMarked(final QueryGoal queryGoal) {
@ -503,7 +503,7 @@ public class TextSnippet implements Comparable<TextSnippet>, Comparator<TextSnip
* @param queryHashes hashes of the words to mark
* @return the marked word if one of the hashes matches,
* else the unmarked word
* @see #getLineMarked(Set)
* @see #getLineMarked(QueryGoal)
*/
private static String getWordMarked(
final String word, final Set<byte[]> queryHashes) {

Loading…
Cancel
Save