Various javadoc fixes

pull/448/head
Daleth Darko 3 years ago
parent 242745f622
commit 3ced06c731

@ -47,7 +47,7 @@ import java.util.TreeSet;
*
* @author Philipp Nolte
*
* @see http://en.wikipedia.org/wiki/Naive_Bayes_classifier
* @see <a href="https://en.wikipedia.org/wiki/Naive_Bayes_classifier">Naive Bayes classifier</a>
*
* @param <T> The feature class.
* @param <K> The category class.

@ -99,7 +99,7 @@ public class WordCache {
/**
* read the dictionary and construct a set of recommendations to a given string
* @param s input value that is used to match recommendations
* @param string input value that is used to match recommendations
* @return set that contains all words that start or end with the input value
*/
public Set<StringBuilder> recommend(StringBuilder string) {
@ -139,7 +139,7 @@ public class WordCache {
* check if the library supports the given word
* A word is supported, if the library contains a word
* that starts or ends with the given word
* @param s the given word
* @param string the given word
* @return true if the library supports the word
*/
public boolean supports(StringBuilder string) {
@ -259,7 +259,7 @@ public class WordCache {
/**
* read the dictionary and construct a set of recommendations to a given string
* @param s input value that is used to match recommendations
* @param string input value that is used to match recommendations
* @return set that contains all words that start or end with the input value
*/
public Set<StringBuilder> recommend(StringBuilder string) {
@ -302,7 +302,7 @@ public class WordCache {
* check if the library supports the given word
* A word is supported, if the library contains a word
* that starts or ends with the given word
* @param s the given word
* @param string the given word
* @return true if the library supports the word
*/
public boolean supports(StringBuilder string) {

@ -82,7 +82,7 @@ public abstract class AbstractFederateSearchConnector implements FederateSearchC
* convert the remote serch result to the internal result presentation
* (field mapping)
*
* @param instanceName internal name
* @param instance internal name
* @param cfgFileName e.g. DATA/SETTINGS/FEDERATECFG/instanceName.SCHEMA
* @return true if success false if not
*/
@ -156,7 +156,7 @@ public abstract class AbstractFederateSearchConnector implements FederateSearchC
* Converts a remote schema result to YaCy schema using the fieldname
* mapping provided as config file
*
* @param remote result (with remote fieldnames)
* @param doc result (with remote fieldnames)
* @return SolrDocument with field names according to the YaCy schema
*/
protected URIMetadataNode toYaCySchema(final SolrDocument doc) throws MalformedURLException {

@ -91,8 +91,9 @@ public class SRURSSConnector {
* @param query the query as string
* @param startRecord number of first record
* @param maximumRecords maximum number of records
* @param verify if true, result entries are verified using the snippet fetch (slow); if false simply the result is returned
* @param cacheStrategy
* @param global if true also search results from other peers are included
* @param agent
* @return
*/
public static RSSFeed loadSRURSS(

@ -52,7 +52,7 @@ public interface SchemaDeclaration {
/**
* @return true when this field is stored as a column-oriented field optimized for sorting, faceting and grouping
* @see https://cwiki.apache.org/confluence/display/solr/DocValues
* @see <a href="https://cwiki.apache.org/confluence/display/solr/DocValues">DocValues</a>
*/
public boolean isDocValue();

@ -333,7 +333,7 @@ public abstract class AbstractSolrConnector implements SolrConnector {
* get a document id result stream from a set of solr queries.
* The result queue is considered as terminated if AbstractSolrConnector.POISON_ID is returned.
* The method returns immediately and feeds the search results into the queue
* @param querystring a list of query strings
* @param querystrings a list of query strings
* @param sort the solr sort string, may be null to be not used
* @param offset common offset of all queries
* @param maxcount maximum count for each query
@ -610,7 +610,7 @@ public abstract class AbstractSolrConnector implements SolrConnector {
* Update a collection of solr input documents.
* This will write only a partial update for all fields given in the SolrInputDocuments
* and leaves all other fields untouched.
* @param solrdocs
* @param solrdoc
* @throws IOException
* @throws SolrException
*/

@ -143,7 +143,7 @@ public interface SolrConnector extends Iterable<String> /* Iterable of document
/**
* add a collection of solr input documents
* @param solrdocs
* @param solrdoc
* @throws IOException
* @throws SolrException
*/
@ -153,7 +153,7 @@ public interface SolrConnector extends Iterable<String> /* Iterable of document
* Update a collection of solr input documents.
* This will write only a partial update for all fields given in the SolrInputDocuments
* and leaves all other fields untouched.
* @param solrdocs
* @param solrdoc
* @throws IOException
* @throws SolrException
*/
@ -321,7 +321,7 @@ public interface SolrConnector extends Iterable<String> /* Iterable of document
* get a document id result stream from a set of solr queries.
* The result queue is considered as terminated if AbstractSolrConnector.POISON_ID is returned.
* The method returns immediately and feeds the search results into the queue
* @param querystring a list of query strings
* @param querystrings a list of query strings
* @param sort the solr sort string, may be null to be not used
* @param offset common offset of all queries
* @param maxcount maximum count for each query

@ -172,7 +172,7 @@ public abstract class SolrServerConnector extends AbstractSolrConnector implemen
/**
* delete entries from solr according the given solr query string
* @param id the url hash of the entry
* @param querystring
* @throws IOException
*/
@Override

@ -118,7 +118,7 @@ public class YJsonResponseWriter implements QueryResponseWriter, SolrjResponseWr
* @param writer an open output writer. Must not be null.
* @param request the initial Solr request. Must not be null.
* @param values the response values. Must not be null.
* @param rsp the Solr response header.
* @param responseObj the Solr response header.
* @throws IOException when a write error occurred
*/
private void write(final Writer writer, final SolrQueryRequest request, final NamedList<?> values,
@ -375,7 +375,6 @@ public class YJsonResponseWriter implements QueryResponseWriter, SolrjResponseWr
*
* @param writer an open output writer. Must not be null.
* @param documents the documents to render. Must not be null.
* @param responseCount the number of documents to process
* @param snippets snippets Solr computed text snippets (highlighting).
* @throws IOException when an unexpected error occurred while writing
*/

@ -146,7 +146,7 @@ public class ResponseHeader extends HeaderFramework {
* @param path Path the cookie belongs to. Default - "/". Can be <b>null</b>.
* @param domain Domain this cookie belongs to. Default - domain name. Can be <b>null</b>.
* @param secure If true cookie will be send only over safe connection such as https
* @see further documentation: <a href="http://docs.sun.com/source/816-6408-10/cookies.htm">docs.sun.com</a>
* @see <a href="http://docs.sun.com/source/816-6408-10/cookies.htm">further documentation at docs.sun.com</a>
*/
public void setCookie(final String name, final String value, final Integer maxage, final String path, final String domain, final boolean secure)
{

@ -114,7 +114,8 @@ public class TimeoutRequest<E> {
/**
* ping a remote server using a given uri and a time-out
* @param uri
* @param host
* @param port
* @param timeout
* @return true if the server exists and replies within the given time-out
*/

@ -46,7 +46,6 @@ public abstract class AbstractScoreMap<E> implements ScoreMap<E> {
/**
* divide the map into two halve parts using the count of the entries
* @param score
* @return the objects of the smaller entries from at least 1/2 of the list
*/
@Override

@ -49,7 +49,6 @@ public interface ScoreMap<E> extends Iterable<E> {
/**
* divide the map into two halve parts using the count of the entries
* @param score
* @return the objects of the smaller entries from at least 1/2 of the list
*/
public List<E> lowerHalf();

@ -132,7 +132,8 @@ public interface Balancer {
* the necessary time until the url is released and returned as CrawlEntry object. In case that a profile
* for the computed Entry does not exist, null is returned
* @param delay true if the requester demands forced delays using explicit thread sleep
* @param profile
* @param cs
* @param robots
* @return a url in a CrawlEntry object
* @throws IOException
* @throws SpaceExceededException

@ -274,7 +274,6 @@ public class Snapshots {
* The actual deletion of files in the target directory must be done elsewhere, this method does not store the snapshot files.
* @param url
* @param depth
* @param date
*/
public Set<Date> announceDeletion(final DigestURL url, final int depth) {
HashSet<Date> dates = new HashSet<>();

@ -83,7 +83,7 @@ public class Transactions {
/**
* @param dir the parent directory of inventory and archive snapshots.
* @param wkhtmltopdfTimeout the maximum to wait for each wkhtmltopdf call when rendering PDF snapshots
* @param wkhtmltopdfSecondsTimeout the maximum to wait for each wkhtmltopdf call when rendering PDF snapshots
*/
public static void init(final File dir, final long wkhtmltopdfSecondsTimeout) {
transactionDir = dir;
@ -361,7 +361,6 @@ public class Transactions {
* @param state the wanted transaction state, State.INVENTORY, State.ARCHIVE or State.ANY
* @param url
* @param depth
* @param date
*/
public static void announceDeletion(final DigestURL url, final int depth, final State state) {
if (state == State.INVENTORY || state == State.ANY) inventory.announceDeletion(url, depth);

@ -29,6 +29,7 @@
package net.yacy.crawler.retrieval;
import java.io.IOException;
import java.net.URL;
import java.util.Date;
import net.yacy.cora.document.encoding.ASCII;
@ -127,7 +128,7 @@ public class Request extends WorkflowJob
*
* @param initiator the hash of the initiator peer
* @param url the {@link URL} to crawl
* @param referrer the hash of the referrer URL
* @param referrerhash the hash of the referrer URL
* @param name the name of the document to crawl
* @param appdate the time when the url was first time appeared
* @param profileHandle the name of the prefetch profile. This must not be null!

@ -82,7 +82,7 @@ public class RobotsTxt {
*
* @param worktables
* @param loader
* @param maxConcurrentTheads maximum active threads this instance is allowed to run for its concurrent tasks
* @param maxActiveTheads maximum active threads this instance is allowed to run for its concurrent tasks
*/
public RobotsTxt(final WorkTables worktables, LoaderDispatcher loader, final int maxActiveTheads) {
this.threadPool = new ThreadPoolExecutor(maxActiveTheads, maxActiveTheads,

@ -639,7 +639,7 @@ public class BookmarksDB {
/**
* set the Tags of the bookmark
* @param tagNames ArrayList with the tagnames
* @param tags2 ArrayList with the tagnames
* @param local sets, whether the updated tags should be stored to tagsDB
*/
public void setTags(final Set<String> tags2, final boolean local){

@ -196,7 +196,7 @@ public final class UserDB {
* Determine if a user has admin rights from a 'Authorisation' http header field.
* Tests both userDB and old style adminpw.
*
* @param auth http-headerline for authorisation.
* @param header http-headerline for authorisation.
* @param cookies
*/
public boolean hasAdminRight(final RequestHeader header, final Cookie[] cookies) {

@ -1051,7 +1051,7 @@ public class WikiCode extends AbstractWikiParser implements WikiParser {
* Process template inclusions in line, eventually with geo coordinate metadata
* @param line line of wiki text
* @return cleaned text with eventual geo coordinates formatted to be recognizable by parser
* @see https://en.wikipedia.org/wiki/Wikipedia:Transclusion
* @see <a href="https://en.wikipedia.org/wiki/Wikipedia:Transclusion">Wikipedia:Transclusion</a>
*/
protected static String processMetadata(final String line) {
StringBuilder processedLine = new StringBuilder(line);

@ -418,7 +418,7 @@ public final class TextParser {
* @param timezoneOffset the local time zone offset
* @param depth the current depth of the crawl
* @param contentLength the length of the source, if known (else -1 should be used)
* @param source a input stream
* @param sourceStream a input stream
* @param maxLinks the maximum total number of links to parse and add to the result documents
* @param maxBytes the maximum number of content bytes to process
* @return a list of documents that result from parsing the source, with empty or null text.
@ -443,7 +443,7 @@ public final class TextParser {
* @param timezoneOffset the local time zone offset
* @param depth the current depth of the crawl
* @param contentLength the length of the source, if known (else -1 should be used)
* @param source a input stream
* @param sourceStream a input stream
* @param maxLinks the maximum total number of links to parse and add to the result documents
* @param maxBytes the maximum number of content bytes to process
* @return a list of documents that result from parsing the source, with empty or null text.
@ -665,7 +665,7 @@ public final class TextParser {
* - the mime type computed from the extension (3.)
* finally the generic parser is added as backup if all above fail
* @param url the given url
* @param mimeType the given mime type
* @param mimeType1 the given mime type
* @return a list of Idiom parsers that may be appropriate for the given criteria
* @throws Parser.Failure when the file extension or the MIME type is denied
*/

@ -148,7 +148,7 @@ public class bzipParser extends AbstractParser implements Parser {
* @param location the parsed resource URL
* @param mimeType the media type of the resource
* @param charset the charset name if known
* @param an instance of bzipParser that is registered as the parser origin of the document
* @param parser instance of bzipParser that is registered as the parser origin of the document
* @return a Document instance
*/
public static Document createMainDocument(final DigestURL location, final String mimeType, final String charset, final bzipParser parser) {

@ -50,7 +50,7 @@ public interface Scraper {
/**
* Process plain text
* @param plain text to process
* @param text text to process
* @param insideTag the eventual direct parent tag. May be null.
*/
public void scrapeText(char[] text, ContentScraper.Tag insideTag);

@ -256,7 +256,7 @@ public class tarParser extends AbstractParser implements Parser {
* the media type of the resource
* @param charset
* the charset name if known
* @param an
* @param parser
* instance of tarParser that is registered as the parser origin of
* the document
* @return a Document instance

@ -1143,7 +1143,7 @@ public class YaCyDefaultServlet extends HttpServlet {
* Write input stream content to response and close input stream.
* @param response servlet response. Must not be null.
* @param targetExt response file format
* @param tmp
* @param inStream
* @throws IOException when a read/write error occured.
*/
private void writeInputStream(HttpServletResponse response, String targetExt, InputStream inStream)

@ -238,7 +238,7 @@ public class BEncodedHeap implements MapStore {
/**
* check if a row with given key exists in the table
*
* @param name
* @param pk
* @return true if the row exists
*/
private boolean containsKey(final byte[] pk) {
@ -248,7 +248,7 @@ public class BEncodedHeap implements MapStore {
/**
* check if a row with given key exists in the table This method is here to implement the Map interface
*
* @param name
* @param key
* @return true if the row exists
*/
@Override
@ -271,7 +271,7 @@ public class BEncodedHeap implements MapStore {
/**
* get a map from the table
*
* @param name
* @param pk
* @return the map if one found or NULL if no entry exists or the entry is corrupt
* @throws SpaceExceededException
* @throws IOException
@ -287,7 +287,7 @@ public class BEncodedHeap implements MapStore {
/**
* get a map from the table this method is here to implement the Map interface
*
* @param name
* @param key
* @return the map if one found or NULL if no entry exists or the entry is corrupt
*/
@Override
@ -383,7 +383,7 @@ public class BEncodedHeap implements MapStore {
* insert a map into the table this method shall be used in exchange of the get method if the previous
* entry value is not needed.
*
* @param name
* @param pk
* @param map
* @throws SpaceExceededException
* @throws IOException
@ -431,7 +431,7 @@ public class BEncodedHeap implements MapStore {
/**
* insert a map into the table
*
* @param name
* @param pk
* @param map
*/
@Override
@ -454,7 +454,7 @@ public class BEncodedHeap implements MapStore {
/**
* delete a map from the table
*
* @param name
* @param pk
* @throws IOException
*/
public void delete(final byte[] pk) throws IOException {
@ -464,7 +464,7 @@ public class BEncodedHeap implements MapStore {
/**
* delete a map from the table
*
* @param name
* @param key
* @throws SpaceExceededException
* @throws IOException
*/
@ -493,7 +493,7 @@ public class BEncodedHeap implements MapStore {
/**
* Copy all the mappings from the specified map to this map.
*
* @param m mappings to be stored in this map
* @param map mappings to be stored in this map
*/
@Override
public void putAll(final Map<? extends byte[], ? extends Map<String, byte[]>> map) {

@ -110,7 +110,7 @@ public class Stack {
* push a new element on the top of the stack using a entry object
* this is only useful for internal processes where a special handle
* is created
* @param b the new stack element
* @param e the new stack element
* @return the handle used to store the new element
* @throws IOException
* @throws SpaceExceededException

@ -43,7 +43,8 @@ public class Stacks {
* Stacks can be created on-the-fly in the given stacksLocation directory
* using simple push operations that create first entries in the stack
* Stacks that do not contain any element upon the close() operation are removed
* @param stackFile
* @param stacksLocation
* @param stacksPrefix
* @throws IOException
*/
public Stacks(final File stacksLocation, final String stacksPrefix) {
@ -160,7 +161,7 @@ public class Stacks {
* this is only useful for internal processes where a special handle
* is created
* @param stack the name of the stack
* @param b the new stack element
* @param e the new stack element
* @return the handle used to store the new element
* @throws IOException
* @throws SpaceExceededException

@ -68,7 +68,6 @@ public abstract class TablesColumnIndex {
/**
* create an index for a given table and given columns
* @param columns - a map of column names and booleans for 'valueIsArray' you want to build an index for
* @param separator - a string value used to split column values into an array
* @param table - an iterator over table rows which should be added to the index
*/
public synchronized void buildIndex(final Map<String,String> columns, final Iterator<Tables.Row> table) {

@ -275,7 +275,7 @@ public class WordReferenceVars extends AbstractReference implements WordReferenc
/**
* Word positions for joined references (for multi word queries).
* @see posintext()
* @see #posintext()
* @return the word positions of the joined references
*/
@Override

@ -69,7 +69,9 @@ public final class RowHandleMap implements HandleMap, Iterable<Map.Entry<byte[],
* The class is used as index for database files
* @param keylength
* @param objectOrder
* @param space
* @param idxbytes
* @param expectedspace
* @param name
*/
public RowHandleMap(final int keylength, final ByteOrder objectOrder, final int idxbytes, final int expectedspace, final String name) {
this.rowdef = new Row(new Column[]{new Column("key", Column.celltype_binary, Column.encoder_bytes, keylength, "key"), new Column("long c-" + idxbytes + " {b256}")}, objectOrder);
@ -407,8 +409,9 @@ public final class RowHandleMap implements HandleMap, Iterable<Map.Entry<byte[],
* map creation will speed up the initialization process.
* @param keylength
* @param objectOrder
* @param space
* @param bufferSize
* @param name
* @param idxbytes
* @param expectedspace
* @return
*/
public final static initDataConsumer asynchronusInitializer(final String name, final int keylength, final ByteOrder objectOrder, final int idxbytes, final int expectedspace) {

@ -71,7 +71,6 @@ public class RowSet extends RowCollection implements Index, Iterable<Row.Entry>,
* import an exported collection
* @param rowdef
* @param exportedCollectionRowEnvironment
* @param columnInEnvironment
*/
public RowSet(final Row rowdef, final Row.Entry exportedCollectionRowEnvironment) {
super(rowdef, exportedCollectionRowEnvironment);
@ -158,7 +157,7 @@ public class RowSet extends RowCollection implements Index, Iterable<Row.Entry>,
/**
* Adds the row to the index. The row is identified by the primary key of the row.
* @param row a index row
* @param entry a index row
* @return true if this set did _not_ already contain the given row.
* @throws IOException
* @throws SpaceExceededException
@ -499,7 +498,8 @@ public class RowSet extends RowCollection implements Index, Iterable<Row.Entry>,
/**
* merge this row collection with another row collection using an simultanous iteration of the input collections
* the current collection is not altered in any way, the returned collection is a new collection with copied content.
* @param c
* @param c0
* @param c1
* @return
* @throws SpaceExceededException
*/

@ -352,7 +352,7 @@ public class WebStructureGraph {
/**
* Compute outgoing references from the source host hash
* @param srcHostName reference source host hash
* @param hosthash reference source host hash
* @return outgoing structure with references mapped from target host hashes to counts or null when the host is not known
*/
public StructureEntry outgoingReferences(final String hosthash) {

@ -59,7 +59,6 @@ public final class BlacklistHelper {
* @param blacklistToUse the name of the blacklist the entry is to be added to
* @param entry the entry that is to be added
* @param header the current HTTP request headers
* @param supportedBlacklistTypes
* @return true when no error occurred and the entry was successfully added
*/
public static boolean addBlacklistEntry(
@ -100,7 +99,6 @@ public final class BlacklistHelper {
* @param blacklistToUse the name of the blacklist the entry is to be deleted from
* @param entry the entry that is to be deleted
* @param header
* @param supportedBlacklistTypes
* @return null if no error occurred, else a String to put into LOCATION
*/
public static String deleteBlacklistEntry(

@ -285,7 +285,7 @@ public class WebgraphConfiguration extends SchemaConfiguration implements Serial
* encode a string containing attributes from anchor rel properties binary:
* bit 0: "me" contained in rel
* bit 1: "nofollow" contained in rel
* @param rel
* @param rels
* @return binary encoded information about rel
*/
private static int relEval(final String rels) {

@ -390,7 +390,7 @@ public class TextSnippet implements Comparable<TextSnippet>, Comparator<TextSnip
/**
* Init a snippet line for urlhash
*
* @param urlhash hash of the url for this snippet
* @param url hash of the url for this snippet
* @param line text to use as snippet
* @param isMarked true if query words already marked in input text
* @param errorCode

@ -57,7 +57,7 @@ public final class serverClassLoader extends ClassLoader {
* This methode is mainly to avoid classpath conflicts for servlet to servlet calls
* making inclusion of htroot in system classpath not crucial
*
* @param servletname (delivered by parent loader without ".class" file extension
* @param classname (delivered by parent loader without ".class" file extension
* @return class in htroot
* @throws ClassNotFoundException
*/

@ -225,7 +225,7 @@ public class serverSwitch {
* default value which will be used if no value is found
* @return the public port of this system on its IPv4 address
*
* @see #getLocalPort(String, int)
* @see #getLocalPort()
*/
public int getPublicPort(final String key, final int dflt) {

@ -67,7 +67,8 @@ public class SignatureOutputStream extends FilterOutputStream {
/**
* write byte
* @see FilterOutputStream.write(int b)
* @param b
* @see FilterOutputStream#write(int)
*/
@Override
public void write(int b) throws IOException {

@ -165,7 +165,7 @@ public class TranslationManager extends TranslatorXliff {
* If a masterOutputFile exists, content is preserved (loaded first)
*
* @param localesFolder folder containing *.lng translation files
* @param masterOutpuFile output file (xliff format). Must not be null.
* @param masterOutputFile output file (xliff format). Must not be null.
* @throws IOException
*/
public void createMasterTranslationLists(final File localesFolder, final File masterOutputFile) throws IOException {

@ -65,7 +65,7 @@ public class TranslatorXliff extends Translator {
/**
* Load translationLists for one language from a Xliff File.
*
* @param translationFile the File, which contains the Lists
* @param xliffFile the File, which contains the Lists
* @return a HashMap, which contains for each File a HashMap with
* translations.
*/
@ -296,9 +296,9 @@ public class TranslatorXliff extends Translator {
/**
* Saves the internal translation map as XLIFF 1.2 file
*
* @param targetLanguage the target language code, if null target is omitted
* @param targetLanguageCode the target language code, if null target is omitted
* in output file and only source text stored
* @param xliffFile name of the output XLIFF file (typically with .xlf
* @param lngFile name of the output XLIFF file (typically with .xlf
* extension)
* @param lng the YaCy translation for one language
*
@ -340,7 +340,7 @@ public class TranslatorXliff extends Translator {
/**
* Helper to make valid xml content text as text may contain html markup
* (the reverse on read is done automatically)
* @param html input string
* @param s input string
* @return xml string
*/
private String toXmlStr(String s) {

Loading…
Cancel
Save