migrated Solr 5.5 -> Solr 6.6 and from Java 1.7 -> 1.8

Also: now Version 1.921
pull/122/head
Michael Peter Christen 8 years ago
parent ce89492319
commit 6fe735945d

@ -35,8 +35,6 @@
<classpathentry kind="lib" path="lib/xmpcore-5.1.3.jar"/>
<classpathentry kind="lib" path="lib/jcifs-1.3.17.jar"/>
<classpathentry kind="lib" path="lib/guava-18.0.jar"/>
<classpathentry kind="lib" path="lib/spatial4j-0.5.jar"/>
<classpathentry kind="lib" path="lib/zookeeper-3.4.6.jar"/>
<classpathentry kind="lib" path="lib/org.restlet.jar"/>
<classpathentry kind="lib" path="lib/fontbox-2.0.6.jar"/>
<classpathentry kind="lib" path="lib/pdfbox-2.0.6.jar"/>
@ -64,26 +62,32 @@
<classpathentry kind="lib" path="lib/httpclient-4.5.3.jar"/>
<classpathentry kind="lib" path="lib/httpmime-4.5.3.jar"/>
<classpathentry kind="lib" path="lib/noggit-0.6.jar"/>
<classpathentry kind="lib" path="lib/solr-core-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/solr-solrj-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/stax2-api-3.1.4.jar"/>
<classpathentry kind="lib" path="lib/woodstox-core-asl-4.4.1.jar"/>
<classpathentry kind="lib" path="lib/lucene-analyzers-common-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-analyzers-phonetic-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-classification-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-codecs-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-core-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-facet-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-grouping-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-highlighter-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-join-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-memory-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-misc-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-queries-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-queryparser-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-spatial-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-suggest-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/lucene-backward-codecs-5.5.4.jar"/>
<classpathentry kind="lib" path="lib/apache-mime4j-0.6.jar"/>
<classpathentry kind="lib" path="lib/commons-math3-3.4.1.jar"/>
<classpathentry kind="lib" path="lib/lucene-analyzers-common-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-analyzers-phonetic-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-backward-codecs-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-classification-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-codecs-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-core-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-facet-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-grouping-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-highlighter-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-join-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-memory-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-misc-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-queries-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-queryparser-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-spatial-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-suggest-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/solr-core-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/solr-solrj-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/zookeeper-3.4.10.jar"/>
<classpathentry kind="lib" path="lib/metrics-core-3.2.2.jar"/>
<classpathentry kind="lib" path="lib/solr-dataimporthandler-6.6.0.jar"/>
<classpathentry kind="lib" path="lib/spatial4j-0.6.jar"/>
<classpathentry kind="con" path="org.eclipse.jdt.junit.JUNIT_CONTAINER/4"/>
<classpathentry kind="lib" path="lib/icu4j-58_2.jar"/>
<classpathentry kind="lib" path="lib/htmllexer.jar"/>

@ -6,9 +6,10 @@ org.eclipse.jdt.core.compiler.annotation.nonnullbydefault=org.eclipse.jdt.annota
org.eclipse.jdt.core.compiler.annotation.nullable=org.eclipse.jdt.annotation.Nullable
org.eclipse.jdt.core.compiler.annotation.nullanalysis=disabled
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.7
org.eclipse.jdt.core.compiler.codegen.methodParameters=do not generate
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
org.eclipse.jdt.core.compiler.compliance=1.7
org.eclipse.jdt.core.compiler.compliance=1.8
org.eclipse.jdt.core.compiler.debug.lineNumber=generate
org.eclipse.jdt.core.compiler.debug.localVariable=generate
org.eclipse.jdt.core.compiler.debug.sourceFile=generate
@ -96,4 +97,4 @@ org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=warning
org.eclipse.jdt.core.compiler.problem.unusedTypeParameter=ignore
org.eclipse.jdt.core.compiler.problem.unusedWarningToken=warning
org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=warning
org.eclipse.jdt.core.compiler.source=1.7
org.eclipse.jdt.core.compiler.source=1.8

@ -1,9 +1,9 @@
# defining some compiler arguments
javacSource=1.7
javacTarget=1.7
javacSource=1.8
javacTarget=1.8
# Release Configuration
releaseVersion=1.92
releaseVersion=1.921
stdReleaseFile=yacy${branch}_v${releaseVersion}_${DSTAMP}_${releaseNr}.tar.gz
sourceReleaseFile=yacy_src_v${releaseVersion}_${DSTAMP}_${releaseNr}.tar.gz
releaseFileParentDir=yacy

@ -170,6 +170,7 @@
<!-- when changing paths here, please also update the paths in /addon/YaCy.app/Contents/Info.plist -->
<path id="project.class.path">
<pathelement location="${build}" />
<pathelement location="${lib}/apache-mime4j-0.6.jar" />
<pathelement location="${lib}/bcmail-jdk15-1.46.jar" />
<pathelement location="${lib}/bcprov-jdk15-1.46.jar" />
<pathelement location="${lib}/chardet.jar" />
@ -183,6 +184,7 @@
<pathelement location="${lib}/commons-jxpath-1.3.jar" />
<pathelement location="${lib}/commons-lang-2.6.jar" />
<pathelement location="${lib}/commons-logging-1.2.jar" />
<pathelement location="${lib}/commons-math3-3.4.1.jar" />
<pathelement location="${lib}/fontbox-2.0.6.jar" />
<pathelement location="${lib}/guava-18.0.jar" />
<pathelement location="${lib}/htmllexer.jar" />
@ -216,48 +218,50 @@
<pathelement location="${lib}/jetty-xml-9.2.21.v20170120.jar" />
<pathelement location="${lib}/jsch-0.1.54.jar" />
<pathelement location="${lib}/json-simple-1.1.1.jar" />
<pathelement location="${lib}/jsonic-1.2.0.jar" />
<pathelement location="${lib}/jsoup-1.10.2.jar" />
<pathelement location="${lib}/jwat-archive-common-1.0.5.jar" />
<pathelement location="${lib}/jwat-common-1.0.5.jar" />
<pathelement location="${lib}/jwat-gzip-1.0.5.jar" />
<pathelement location="${lib}/jwat-archive-common-1.0.5.jar" />
<pathelement location="${lib}/jwat-warc-1.0.5.jar" />
<pathelement location="${lib}/log4j-over-slf4j-1.7.24.jar" />
<pathelement location="${lib}/lucene-analyzers-common-5.5.4.jar" />
<pathelement location="${lib}/lucene-analyzers-phonetic-5.5.4.jar" />
<pathelement location="${lib}/lucene-backward-codecs-5.5.4.jar" />
<pathelement location="${lib}/lucene-classification-5.5.4.jar" />
<pathelement location="${lib}/lucene-codecs-5.5.4.jar" />
<pathelement location="${lib}/lucene-core-5.5.4.jar" />
<pathelement location="${lib}/lucene-facet-5.5.4.jar" />
<pathelement location="${lib}/lucene-grouping-5.5.4.jar" />
<pathelement location="${lib}/lucene-highlighter-5.5.4.jar" />
<pathelement location="${lib}/lucene-join-5.5.4.jar" />
<pathelement location="${lib}/lucene-memory-5.5.4.jar" />
<pathelement location="${lib}/lucene-misc-5.5.4.jar" />
<pathelement location="${lib}/lucene-queries-5.5.4.jar" />
<pathelement location="${lib}/lucene-queryparser-5.5.4.jar" />
<pathelement location="${lib}/lucene-spatial-5.5.4.jar" />
<pathelement location="${lib}/lucene-suggest-5.5.4.jar" />
<pathelement location="${lib}/jsonic-1.2.0.jar" />
<pathelement location="${lib}/langdetect.jar" />
<pathelement location="${lib}/lucene-analyzers-common-6.6.0.jar" />
<pathelement location="${lib}/lucene-analyzers-phonetic-6.6.0.jar" />
<pathelement location="${lib}/lucene-backward-codecs-6.6.0.jar" />
<pathelement location="${lib}/lucene-classification-6.6.0.jar" />
<pathelement location="${lib}/lucene-codecs-6.6.0.jar" />
<pathelement location="${lib}/lucene-core-6.6.0.jar" />
<pathelement location="${lib}/lucene-facet-6.6.0.jar" />
<pathelement location="${lib}/lucene-grouping-6.6.0.jar" />
<pathelement location="${lib}/lucene-highlighter-6.6.0.jar" />
<pathelement location="${lib}/lucene-join-6.6.0.jar" />
<pathelement location="${lib}/lucene-memory-6.6.0.jar" />
<pathelement location="${lib}/lucene-misc-6.6.0.jar" />
<pathelement location="${lib}/lucene-queries-6.6.0.jar" />
<pathelement location="${lib}/lucene-queryparser-6.6.0.jar" />
<pathelement location="${lib}/lucene-spatial-6.6.0.jar" />
<pathelement location="${lib}/lucene-suggest-6.6.0.jar" />
<pathelement location="${lib}/metadata-extractor-2.10.1.jar" />
<pathelement location="${lib}/metrics-core-3.2.2.jar" />
<pathelement location="${lib}/noggit-0.6.jar" />
<pathelement location="${lib}/org.restlet.jar" />
<pathelement location="${lib}/langdetect.jar" />
<pathelement location="${lib}/pdfbox-2.0.6.jar" />
<pathelement location="${lib}/poi-3.15.jar" />
<pathelement location="${lib}/poi-scratchpad-3.15.jar" />
<pathelement location="${lib}/slf4j-api-1.7.24.jar" />
<pathelement location="${lib}/slf4j-jdk14-1.7.24.jar" />
<pathelement location="${lib}/solr-core-5.5.4.jar" />
<pathelement location="${lib}/solr-solrj-5.5.4.jar" />
<pathelement location="${lib}/spatial4j-0.5.jar" />
<pathelement location="${lib}/solr-core-6.6.0.jar" />
<pathelement location="${lib}/solr-dataimporthandler-6.6.0.jar" />
<pathelement location="${lib}/solr-solrj-6.6.0.jar" />
<pathelement location="${lib}/spatial4j-0.6.jar" />
<pathelement location="${lib}/stax2-api_3.1.4.jar" />
<pathelement location="${lib}/weupnp-0.1.4.jar" />
<pathelement location="${lib}/woodstox-core-asl-4.4.1.jar" />
<pathelement location="${lib}/xercesImpl.jar" />
<pathelement location="${lib}/xml-apis.jar" />
<pathelement location="${lib}/xmpcore-5.1.3.jar" />
<pathelement location="${lib}/zookeeper-3.4.6.jar" />
<pathelement location="${lib}/zookeeper-3.4.10.jar" />
</path>
<target name="compile-core" depends="init" description="compile YaCy core">

@ -111,10 +111,6 @@
</fields>
<uniqueKey>id</uniqueKey>
<!-- defaultSearchField element is deprecated in Solr versions 3.6 and higher. Instead, df request parameter should be used-->
<defaultSearchField>text_t</defaultSearchField>
<!-- default operator parameter has been deprecated in Solr versions 3.6 and higher. Instead, the query parser q.op parameter should be used in request handler. -->
<solrQueryParser defaultOperator="AND"/>
<!-- if you are using a search client using the default search field "text", then use this line to get to all indexed documents -->
<!-- <copyField source="*_t" dest="text" maxChars="3000"/> -->

@ -35,7 +35,7 @@
that you fully re-index after changing this setting as it can
affect both how text is indexed and queried.
-->
<luceneMatchVersion>5.5.0</luceneMatchVersion>
<luceneMatchVersion>6.6.0</luceneMatchVersion>
<!-- <lib/> directives can be used to instruct Solr to load any Jars
identified and use them to resolve any "plugins" specified in
@ -72,19 +72,16 @@
The examples below can be used to load some solr-contribs along
with their external dependencies.
-->
<!--
<lib dir="../../../contrib/extraction/lib" regex=".*\.jar" />
<lib dir="../../../dist/" regex="solr-cell-\d.*\.jar" />
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-dataimporthandler-.*\.jar" />
<lib dir="../../../contrib/clustering/lib/" regex=".*\.jar" />
<lib dir="../../../dist/" regex="solr-clustering-\d.*\.jar" />
<lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
<lib dir="../../../contrib/langid/lib/" regex=".*\.jar" />
<lib dir="../../../dist/" regex="solr-langid-\d.*\.jar" />
<lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
<lib dir="../../../contrib/velocity/lib" regex=".*\.jar" />
<lib dir="../../../dist/" regex="solr-velocity-\d.*\.jar" />
-->
<lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
<!-- an exact 'path' can be used instead of a 'dir' to specify a
specific jar file. This will cause a serious error to be logged
@ -119,23 +116,7 @@
persistent, and doesn't work with replication.
-->
<directoryFactory name="DirectoryFactory"
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}">
<!-- These will be used if you are using the solr.HdfsDirectoryFactory,
otherwise they will be ignored. If you don't plan on using hdfs,
you can safely remove this section. -->
<!-- The root directory that collection data should be written to. -->
<str name="solr.hdfs.home">${solr.hdfs.home:}</str>
<!-- The hadoop configuration files to use for the hdfs client. -->
<str name="solr.hdfs.confdir">${solr.hdfs.confdir:}</str>
<!-- Enable/Disable the hdfs cache. -->
<str name="solr.hdfs.blockcache.enabled">${solr.hdfs.blockcache.enabled:true}</str>
<!-- Enable/Disable using one global cache for all SolrCores.
The settings used will be from the first HdfsDirectoryFactory created. -->
<str name="solr.hdfs.blockcache.global">${solr.hdfs.blockcache.global:true}</str>
</directoryFactory>
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
<!-- The CodecFactory for defining the format of the inverted index.
The default implementation is SchemaCodecFactory, which is the official Lucene
@ -148,33 +129,6 @@
-->
<codecFactory class="solr.SchemaCodecFactory"/>
<!-- To enable dynamic schema REST APIs, use the following for <schemaFactory>:
<schemaFactory class="ManagedIndexSchemaFactory">
<bool name="mutable">true</bool>
<str name="managedSchemaResourceName">managed-schema</str>
</schemaFactory>
When ManagedIndexSchemaFactory is specified, Solr will load the schema from
he resource named in 'managedSchemaResourceName', rather than from schema.xml.
Note that the managed schema resource CANNOT be named schema.xml. If the managed
schema does not exist, Solr will create it after reading schema.xml, then rename
'schema.xml' to 'schema.xml.bak'.
Do NOT hand edit the managed schema - external modifications will be ignored and
overwritten as a result of schema modification REST API calls.
When ManagedIndexSchemaFactory is specified with mutable = true, schema
modification REST API calls will be allowed; otherwise, error responses will be
sent back for these requests.
-->
<schemaFactory class="ClassicIndexSchemaFactory"/>
<!-- using MMapDirectoryFactory as solution for ClosedChannelException
given in https://issues.apache.org/jira/browse/SOLR-2247
this feature is now switched on in the startYACY.sh file using a -D option
-->
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Index Config - These settings control low-level behavior of indexing
Most example settings here show the default value, but are commented
@ -188,18 +142,12 @@
<filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
-->
<!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
<writeLockTimeout>20000</writeLockTimeout>
<!-- The maximum number of simultaneous threads that may be
indexing documents at once in IndexWriter; if more than this
many threads arrive they will wait for others to finish.
Default in Solr/Lucene is 8. -->
<!-- <maxIndexingThreads>8</maxIndexingThreads> -->
<!-- <writeLockTimeout>1000</writeLockTimeout> -->
<!-- Expert: Enabling compound file will use less files for the index,
using fewer file descriptors on the expense of performance decrease.
Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
<useCompoundFile>true</useCompoundFile>
<!-- <useCompoundFile>false</useCompoundFile> -->
<!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
indexing for buffering added documents and deletions before they are
@ -209,8 +157,8 @@
If both ramBufferSizeMB and maxBufferedDocs is set, then
Lucene will flush based on whichever limit is hit first.
The default is 100 MB. -->
<ramBufferSizeMB>32</ramBufferSizeMB>
<maxBufferedDocs>100</maxBufferedDocs>
<!-- <ramBufferSizeMB>100</ramBufferSizeMB> -->
<!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
<!-- Expert: Merge Policy
The Merge Policy in Lucene controls how merging of segments is done.
@ -219,22 +167,10 @@
Even older versions of Lucene used LogDocMergePolicy.
-->
<!--
<mergePolicy class="org.apache.lucene.index.TieredMergePolicy">
<mergePolicyFactory class="solr.TieredMergePolicyFactory">
<int name="maxMergeAtOnce">10</int>
<int name="segmentsPerTier">10</int>
</mergePolicy>
-->
<!-- Merge Factor
The merge factor controls how many segments will get merged at a time.
For TieredMergePolicy, mergeFactor is a convenience parameter which
will set both MaxMergeAtOnce and SegmentsPerTier at once.
For LogByteSizeMergePolicy, mergeFactor decides how many new segments
will be allowed before they are merged into one.
Default is 10 for both merge policies.
-->
<!--
<mergeFactor>10</mergeFactor>
</mergePolicyFactory>
-->
<!-- Expert: Merge Scheduler
@ -268,33 +204,6 @@
-->
<lockType>${solr.lock.type:native}</lockType>
<!-- Unlock On Startup
If true, unlock any held write or commit locks on startup.
This defeats the locking mechanism that allows multiple
processes to safely access a lucene index, and should be used
with care. Default is "false".
This is not needed if lock type is 'single'
-->
<!--
<unlockOnStartup>false</unlockOnStartup>
-->
<!-- Expert: Controls how often Lucene loads terms into memory
Default is 128 and is likely good for most everyone.
-->
<!-- <termIndexInterval>128</termIndexInterval> -->
<!-- If true, IndexReaders will be opened/reopened from the IndexWriter
instead of from the Directory. Hosts in a master/slave setup
should have this set to false while those in a SolrCloud
cluster need to be set to true. Default: true
-->
<!--
<nrtMode>true</nrtMode>
-->
<!-- Commit Deletion Policy
Custom deletion policies can be specified here. The class must
implement org.apache.lucene.index.IndexDeletionPolicy.
@ -335,13 +244,6 @@
this is enabled here, and controlled through log4j.properties.
-->
<infoStream>true</infoStream>
<!--
Use true to enable this safety check, which can help
reduce the risk of propagating index corruption from older segments
into new ones, at the expense of slower merging.
-->
<checkIntegrityAtMerge>false</checkIntegrityAtMerge>
</indexConfig>
@ -399,7 +301,7 @@
-->
<autoCommit>
<maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
<openSearcher>true</openSearcher>
<openSearcher>false</openSearcher>
</autoCommit>
<!-- softAutoCommit is like autoCommit except it causes a
@ -473,15 +375,6 @@
<str name="someArg">Some Value</str>
</indexReaderFactory >
-->
<!-- By explicitly declaring the Factory, the termIndexDivisor can
be specified.
-->
<!--
<indexReaderFactory name="IndexReaderFactory"
class="solr.StandardIndexReaderFactory">
<int name="setTermIndexDivisor">12</int>
</indexReaderFactory >
-->
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Query section - these settings control query time things like caches
@ -535,8 +428,8 @@
and old cache.
-->
<filterCache class="solr.FastLRUCache"
size="100"
initialSize="100"
size="512"
initialSize="512"
autowarmCount="0"/>
<!-- Query Result Cache
@ -545,8 +438,8 @@
(DocList) based on a query, a sort, and the range of documents requested.
-->
<queryResultCache class="solr.LRUCache"
size="100"
initialSize="100"
size="512"
initialSize="512"
autowarmCount="0"/>
<!-- Document Cache
@ -556,8 +449,8 @@
this cache will not be autowarmed.
-->
<documentCache class="solr.LRUCache"
size="64"
initialSize="64"
size="512"
initialSize="512"
autowarmCount="0"/>
<!-- custom cache currently used by block join -->
@ -574,10 +467,12 @@
by document id. The fieldValueCache is created by default
even if not configured here.
-->
<!--
<fieldValueCache class="solr.FastLRUCache"
size="64"
autowarmCount="0"
size="512"
autowarmCount="128"
showItems="32" />
-->
<!-- Custom Cache
@ -684,18 +579,7 @@
warming searcher and use it. If "false" then all requests
will block until the first searcher is done warming.
-->
<useColdSearcher>true</useColdSearcher>
<!-- Max Warming Searchers
Maximum number of searchers that may be warming in the
background concurrently. An error is returned if this limit
is exceeded.
Recommend values of 1-2 for read-only slaves, higher for
masters w/o cache warming.
-->
<maxWarmingSearchers>2</maxWarmingSearchers>
<useColdSearcher>false</useColdSearcher>
</query>
@ -829,6 +713,13 @@
not be initialized until the first request that uses it.
-->
<requestHandler name="/dataimport" class="solr.DataImportHandler">
<lst name="defaults">
<str name="config">solr-data-config.xml</str>
</lst>
</requestHandler>
<!-- SearchHandler
http://wiki.apache.org/solr/SearchHandler
@ -838,14 +729,14 @@
of SearchComponents (see below) and supports distributed
queries across multiple shards
-->
<requestHandler name="/select" class="solr.SearchHandler" startup="lazy">
<requestHandler name="/select" class="solr.SearchHandler">
<!-- default values for query parameters can be specified, these
will be overridden by parameters in the request
-->
<lst name="defaults">
<str name="echoParams">explicit</str>
<int name="rows">10</int>
<str name="df">text_t</str>
<str name="df">text</str>
</lst>
<!-- In addition to defaults, "appends" params can be specified
to identify values which should be appended to the list of
@ -902,94 +793,50 @@
</requestHandler>
<!-- A request handler that returns indented JSON by default -->
<requestHandler name="/query" class="solr.SearchHandler" startup="lazy">
<requestHandler name="/query" class="solr.SearchHandler">
<lst name="defaults">
<str name="echoParams">explicit</str>
<str name="wt">json</str>
<str name="indent">true</str>
<str name="df">text_t</str>
<str name="df">text</str>
</lst>
</requestHandler>
<!-- A Robust Example
<!-- realtime get handler, guaranteed to return the latest stored fields of
any document, without the need to commit or open a new searcher. The
current implementation relies on the updateLog feature being enabled.
This example SearchHandler declaration shows off usage of the
SearchHandler with many defaults declared
** WARNING **
Do NOT disable the realtime get handler at /get if you are using
SolrCloud otherwise any leader election will cause a full sync in ALL
replicas for the shard in question. Similarly, a replica recovery will
also always fetch the complete index from the leader because a partial
sync will not be possible in the absence of this handler.
Note that multiple instances of the same Request Handler
(SearchHandler) can be registered multiple times with different
names (and different init parameters)
-->
<requestHandler name="/get" class="solr.RealTimeGetHandler" startup="lazy">
<requestHandler name="/browse" class="solr.SearchHandler">
<lst name="defaults">
<str name="omitHeader">true</str>
<str name="wt">json</str>
<str name="indent">true</str>
</lst>
</requestHandler>
<!--
The export request handler is used to export full sorted result sets.
Do not change these defaults.
-->
<requestHandler name="/export" class="solr.SearchHandler" startup="lazy">
<lst name="invariants">
<str name="rq">{!xport}</str>
<str name="wt">xsort</str>
<str name="distrib">false</str>
</lst>
<arr name="components">
<str>query</str>
</arr>
</requestHandler>
<!-- Update Request Handler.
http://wiki.apache.org/solr/UpdateXmlMessages
The canonical Request Handler for Modifying the Index through
commands specified using XML, JSON, CSV, or JAVABIN
<str name="echoParams">explicit</str>
Note: Since solr1.1 requestHandlers requires a valid content
type header if posted in the body. For example, curl now
requires: -H 'Content-type:text/xml; charset=utf-8'
<!-- VelocityResponseWriter settings -->
<str name="wt">velocity</str>
<str name="v.template">browse</str>
<str name="v.layout">layout</str>
To override the request content type and force a specific
Content-type, use the request parameter:
?update.contentType=text/csv
<!-- Query settings -->
<str name="defType">edismax</str>
<str name="q.alt">*:*</str>
<str name="rows">10</str>
<str name="fl">*,score</str>
This handler will pick a response format to match the input
if the 'wt' parameter is not explicit
-->
<requestHandler name="/update" class="solr.UpdateRequestHandler" startup="lazy">
<!-- See below for information on defining
updateRequestProcessorChains that can be used by name
on each Update Request
-->
<!--
<lst name="defaults">
<str name="update.chain">dedupe</str>
<!-- Faceting defaults -->
<str name="facet">on</str>
<str name="facet.mincount">1</str>
</lst>
-->
</requestHandler>
<!-- The following are implicitly added
<requestHandler name="/update/json" class="solr.UpdateRequestHandler">
<lst name="defaults">
<str name="stream.contentType">application/json</str>
</lst>
</requestHandler>
<requestHandler name="/update/csv" class="solr.UpdateRequestHandler">
<initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell,/browse">
<lst name="defaults">
<str name="stream.contentType">application/csv</str>
<str name="df">text</str>
</lst>
</requestHandler>
-->
</initParams>
<!-- Solr Cell Update Request Handler
@ -1009,156 +856,6 @@
<str name="fmap.div">ignored_</str>
</lst>
</requestHandler>
<!-- Field Analysis Request Handler
RequestHandler that provides much the same functionality as
analysis.jsp. Provides the ability to specify multiple field
types and field names in the same request and outputs
index-time and query-time analysis for each of them.
Request parameters are:
analysis.fieldname - field name whose analyzers are to be used
analysis.fieldtype - field type whose analyzers are to be used
analysis.fieldvalue - text for index-time analysis
q (or analysis.q) - text for query time analysis
analysis.showmatch (true|false) - When set to true and when
query analysis is performed, the produced tokens of the
field value analysis will be marked as "matched" for every
token that is produces by the query analysis
-->
<requestHandler name="/analysis/field"
startup="lazy"
class="solr.FieldAnalysisRequestHandler" />
<!-- Document Analysis Handler
http://wiki.apache.org/solr/AnalysisRequestHandler
An analysis handler that provides a breakdown of the analysis
process of provided documents. This handler expects a (single)
content stream with the following format:
<docs>
<doc>
<field name="id">1</field>
<field name="name">The Name</field>
<field name="text">The Text Value</field>
</doc>
<doc>...</doc>
<doc>...</doc>
...
</docs>
Note: Each document must contain a field which serves as the
unique key. This key is used in the returned response to associate
an analysis breakdown to the analyzed document.
Like the FieldAnalysisRequestHandler, this handler also supports
query analysis by sending either an "analysis.query" or "q"
request parameter that holds the query text to be analyzed. It
also supports the "analysis.showmatch" parameter which when set to
true, all field tokens that match the query tokens will be marked
as a "match".
-->
<requestHandler name="/analysis/document"
class="solr.DocumentAnalysisRequestHandler"
startup="lazy" />
<!-- Admin Handlers
Admin Handlers - This will register all the standard admin
RequestHandlers.
<requestHandler name="/admin/"
class="solr.admin.AdminHandlers" />
-->
<!-- This single handler is equivalent to the following... -->
<!--
<requestHandler name="/admin/luke" class="solr.admin.LukeRequestHandler" />
<requestHandler name="/admin/system" class="solr.admin.SystemInfoHandler" />
<requestHandler name="/admin/plugins" class="solr.admin.PluginInfoHandler" />
<requestHandler name="/admin/threads" class="solr.admin.ThreadDumpHandler" />
<requestHandler name="/admin/properties" class="solr.admin.PropertiesRequestHandler" />
<requestHandler name="/admin/file" class="solr.admin.ShowFileRequestHandler" >
-->
<!-- If you wish to hide files under ${solr.home}/conf, explicitly
register the ShowFileRequestHandler using the definition below.
NOTE: The glob pattern ('*') is the only pattern supported at present, *.xml will
not exclude all files ending in '.xml'. Use it to exclude _all_ updates
-->
<!--
<requestHandler name="/admin/file"
class="solr.admin.ShowFileRequestHandler" >
<lst name="invariants">
<str name="hidden">synonyms.txt</str>
<str name="hidden">anotherfile.txt</str>
<str name="hidden">*</str>
</lst>
</requestHandler>
-->
<!-- ping/healthcheck -->
<requestHandler name="/admin/ping" class="solr.PingRequestHandler" startup="lazy">
<lst name="invariants">
<str name="q">solrpingquery</str>
</lst>
<lst name="defaults">
<str name="echoParams">all</str>
</lst>
<!-- An optional feature of the PingRequestHandler is to configure the
handler with a "healthcheckFile" which can be used to enable/disable
the PingRequestHandler.
relative paths are resolved against the data dir
-->
<!-- <str name="healthcheckFile">server-enabled.txt</str> -->
</requestHandler>
<!-- Echo the request contents back to the client -->
<requestHandler name="/debug/dump" class="solr.DumpRequestHandler" startup="lazy">
<lst name="defaults">
<str name="echoParams">explicit</str>
<str name="echoHandler">true</str>
</lst>
</requestHandler>
<!-- Solr Replication
The SolrReplicationHandler supports replicating indexes from a
"master" used for indexing and "slaves" used for queries.
http://wiki.apache.org/solr/SolrReplication
It is also necessary for SolrCloud to function (in Cloud mode, the
replication handler is used to bulk transfer segments when nodes
are added or need to recover).
https://wiki.apache.org/solr/SolrCloud/
-->
<requestHandler name="/replication" class="solr.ReplicationHandler" startup="lazy">
<!--
To enable simple master/slave replication, uncomment one of the
sections below, depending on whether this solr instance should be
the "master" or a "slave". If this instance is a "slave" you will
also need to fill in the masterUrl to point to a real machine.
-->
<!--
<lst name="master">
<str name="replicateAfter">commit</str>
<str name="replicateAfter">startup</str>
<str name="confFiles">schema.xml,stopwords.txt</str>
</lst>
-->
<!--
<lst name="slave">
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
<str name="pollInterval">00:00:60</str>
</lst>
-->
</requestHandler>
<!-- Search Components
Search components are registered to SolrCore and used by
@ -1202,123 +899,171 @@
-->
<!-- Term Vector Component
<!-- Spell Check
http://wiki.apache.org/solr/TermVectorComponent
The spell check component can return a list of alternative spelling
suggestions.
http://wiki.apache.org/solr/SpellCheckComponent
-->
<searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
<searchComponent name="spellcheck" class="solr.SpellCheckComponent">
<!-- A request handler for demonstrating the term vector component
<str name="queryAnalyzerFieldType">text_general</str>
This is purely as an example.
<!-- Multiple "Spell Checkers" can be declared and used by this
component
-->
In reality you will likely want to add the component to your
already specified request handlers.
<!-- a spellchecker built from a field of the main index -->
<lst name="spellchecker">
<str name="name">default</str>
<str name="field">text</str>
<str name="classname">solr.DirectSolrSpellChecker</str>
<!-- the spellcheck distance measure used, the default is the internal levenshtein -->
<str name="distanceMeasure">internal</str>
<!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
<float name="accuracy">0.5</float>
<!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
<int name="maxEdits">2</int>
<!-- the minimum shared prefix when enumerating terms -->
<int name="minPrefix">1</int>
<!-- maximum number of inspections per result. -->
<int name="maxInspections">5</int>
<!-- minimum length of a query term to be considered for correction -->
<int name="minQueryLength">4</int>
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
-->
<requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<str name="df">text_t</str>
<bool name="tv">true</bool>
</lst>
<arr name="last-components">
<str>tvComponent</str>
</arr>
</requestHandler>
<!-- Clustering Component
You'll need to set the solr.clustering.enabled system property
when running solr to run with clustering enabled:
java -Dsolr.clustering.enabled=true -jar start.jar
<!-- a spellchecker that can break or combine words. See "/spell" handler below for usage -->
<lst name="spellchecker">
<str name="name">wordbreak</str>
<str name="classname">solr.WordBreakSolrSpellChecker</str>
<str name="field">name</str>
<str name="combineWords">true</str>
<str name="breakWords">true</str>
<int name="maxChanges">10</int>
</lst>
http://wiki.apache.org/solr/ClusteringComponent
http://carrot2.github.io/solr-integration-strategies/
<!-- a spellchecker that uses a different distance measure -->
<!--
<lst name="spellchecker">
<str name="name">jarowinkler</str>
<str name="field">spell</str>
<str name="classname">solr.DirectSolrSpellChecker</str>
<str name="distanceMeasure">
org.apache.lucene.search.spell.JaroWinklerDistance
</str>
</lst>
-->
<searchComponent name="clustering"
enable="${solr.clustering.enabled:false}"
class="solr.clustering.ClusteringComponent" >
<lst name="engine">
<str name="name">lingo</str>
<!-- Class name of a clustering algorithm compatible with the Carrot2 framework.
Currently available open source algorithms are:
* org.carrot2.clustering.lingo.LingoClusteringAlgorithm
* org.carrot2.clustering.stc.STCClusteringAlgorithm
* org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
<!-- a spellchecker that use an alternate comparator
See http://project.carrot2.org/algorithms.html for more information.
comparatorClass be one of:
1. score (default)
2. freq (Frequency first, then score)
3. A fully qualified class name
-->
<!--
<lst name="spellchecker">
<str name="name">freq</str>
<str name="field">lowerfilt</str>
<str name="classname">solr.DirectSolrSpellChecker</str>
<str name="comparatorClass">freq</str>
-->
A commercial algorithm Lingo3G (needs to be installed separately) is defined as:
* com.carrotsearch.lingo3g.Lingo3GClusteringAlgorithm
<!-- A spellchecker that reads the list of words from a file -->
<!--
<lst name="spellchecker">
<str name="classname">solr.FileBasedSpellChecker</str>
<str name="name">file</str>
<str name="sourceLocation">spellings.txt</str>
<str name="characterEncoding">UTF-8</str>
<str name="spellcheckIndexDir">spellcheckerFile</str>
</lst>
-->
<str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>
</searchComponent>
<!-- Override location of the clustering algorithm's resources
(attribute definitions and lexical resources).
<!-- A request handler for demonstrating the spellcheck component.
A directory from which to load algorithm-specific stop words,
stop labels and attribute definition XMLs.
NOTE: This is purely as an example. The whole purpose of the
SpellCheckComponent is to hook it into the request handler that
handles your normal user queries so that a separate request is
not needed to get suggestions.
For an overview of Carrot2 lexical resources, see:
http://download.carrot2.org/head/manual/#chapter.lexical-resources
IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
For an overview of Lingo3G lexical resources, see:
http://download.carrotsearch.com/lingo3g/manual/#chapter.lexical-resources
See http://wiki.apache.org/solr/SpellCheckComponent for details
on the request parameters.
-->
<str name="carrot.resourcesDir">clustering/carrot2</str>
</lst>
<!-- An example definition for the STC clustering algorithm. -->
<lst name="engine">
<str name="name">stc</str>
<str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
<requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<str name="df">text</str>
<!-- Solr will use suggestions from both the 'default' spellchecker
and from the 'wordbreak' spellchecker and combine them.
collations (re-written queries) can include a combination of
corrections from both spellcheckers -->
<str name="spellcheck.dictionary">default</str>
<str name="spellcheck.dictionary">wordbreak</str>
<str name="spellcheck">on</str>
<str name="spellcheck.extendedResults">true</str>
<str name="spellcheck.count">10</str>
<str name="spellcheck.alternativeTermCount">5</str>
<str name="spellcheck.maxResultsForSuggest">5</str>
<str name="spellcheck.collate">true</str>
<str name="spellcheck.collateExtendedResults">true</str>
<str name="spellcheck.maxCollationTries">10</str>
<str name="spellcheck.maxCollations">5</str>
</lst>
<!-- An example definition for the bisecting kmeans clustering algorithm. -->
<lst name="engine">
<str name="name">kmeans</str>
<str name="carrot.algorithm">org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm</str>
<arr name="last-components">
<str>spellcheck</str>
</arr>
</requestHandler>
<searchComponent name="suggest" class="solr.SuggestComponent">
<lst name="suggester">
<str name="name">mySuggester</str>
<str name="lookupImpl">FuzzyLookupFactory</str> <!-- org.apache.solr.spelling.suggest.fst -->
<str name="dictionaryImpl">DocumentDictionaryFactory</str> <!-- org.apache.solr.spelling.suggest.HighFrequencyDictionaryFactory -->
<str name="field">cat</str>
<str name="weightField">price</str>
<str name="suggestAnalyzerFieldType">string</str>
</lst>
</searchComponent>
<!-- A request handler for demonstrating the clustering component
<requestHandler name="/suggest" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<str name="suggest">true</str>
<str name="suggest.count">10</str>
</lst>
<arr name="components">
<str>suggest</str>
</arr>
</requestHandler>
<!-- Term Vector Component
http://wiki.apache.org/solr/TermVectorComponent
-->
<searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
<!-- A request handler for demonstrating the term vector component
This is purely as an example.
In reality you will likely want to add the component to your
already specified request handlers.
-->
<requestHandler name="/clustering"
startup="lazy"
enable="${solr.clustering.enabled:false}"
class="solr.SearchHandler">
<requestHandler name="/tvrh" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<bool name="clustering">true</bool>
<bool name="clustering.results">true</bool>
<!-- Field name with the logical "title" of a each document (optional) -->
<str name="carrot.title">name</str>
<!-- Field name with the logical "URL" of a each document (optional) -->
<str name="carrot.url">id</str>
<!-- Field name with the logical "content" of a each document (optional) -->
<str name="carrot.snippet">features</str>
<!-- Apply highlighter to the title/ content and use this for clustering. -->
<bool name="carrot.produceSummary">true</bool>
<!-- the maximum number of labels per cluster -->
<!--<int name="carrot.numDescriptions">5</int>-->
<!-- produce sub clusters -->
<bool name="carrot.outputSubClusters">false</bool>
<!-- Configure the remaining request handler parameters. -->
<str name="defType">edismax</str>
<str name="qf">
text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
</str>
<str name="q.alt">*:*</str>
<str name="rows">10</str>
<str name="fl">*,score</str>
<str name="df">text</str>
<bool name="tv">true</bool>
</lst>
<arr name="last-components">
<str>clustering</str>
<str>tvComponent</str>
</arr>
</requestHandler>
@ -1361,7 +1106,7 @@
<requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<str name="echoParams">explicit</str>
<str name="df">text_t</str>
<str name="df">text</str>
</lst>
<arr name="last-components">
<str>elevator</str>
@ -1584,8 +1329,9 @@
<!--
Custom response writers can be declared as needed...
-->
<queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy"/>
<queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" startup="lazy">
<str name="template.base.dir">${velocity.template.base.dir:}</str>
</queryResponseWriter>
<!-- XSLT response writer transforms the XML output by any xslt file found
in Solr's conf/xslt directory. Changes to xslt files are checked for
@ -1597,7 +1343,7 @@
<!-- Query Parsers
http://wiki.apache.org/solr/SolrQuerySyntax
https://cwiki.apache.org/confluence/display/solr/Query+Syntax+and+Parsing
Multiple QParserPlugins can be registered by name, and then
used in either the "defType" param for the QueryComponent (used
@ -1640,13 +1386,10 @@
<transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
<double name="defaultValue">5</double>
</transformer>
If you are using the QueryElevationComponent, you may wish to mark documents that get boosted. The
EditorialMarkerFactory will do exactly that:
<transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
-->
<!-- Legacy config for the admin interface -->
<admin>
<defaultQuery>*:*</defaultQuery>
</admin>
</config>

@ -22,15 +22,13 @@
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.text.ParseException;
import java.time.Instant;
import java.util.AbstractMap;
import java.util.Date;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import org.apache.solr.util.DateFormatUtil;
import net.yacy.cora.date.AbstractFormatter;
import net.yacy.cora.document.analysis.Classification.ContentDomain;
import net.yacy.cora.document.id.MultiProtocolURL;
@ -260,15 +258,14 @@ public class yacysearchtrailer {
if (name.length() < 10) continue;
count = theSearch.dateNavigator.get(name);
String shortname = name.substring(0, 10);
long d;
Date dd;
try {dd = DateFormatUtil.parseDate(name); d = dd.getTime();} catch (ParseException e) {continue;}
long d = Instant.parse(name).toEpochMilli();
Date dd = new Date(d);
if (fromconstraint != null && dd.before(fromconstraint)) continue;
if (toconstraint != null && dd.after(toconstraint)) break;
if (dx > 0) {
while (d - dx > AbstractFormatter.dayMillis) {
dx += AbstractFormatter.dayMillis;
String sn = DateFormatUtil.formatExternal(new Date(dx)).substring(0, 10);
String sn = new Date(dx).toInstant().toString().substring(0, 10);
prop.put("nav-dates_element_" + i + "_on", 0);
prop.put(fileType, "nav-dates_element_" + i + "_name", sn);
prop.put("nav-dates_element_" + i + "_count", 0);

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -1,400 +0,0 @@
/**
* ConcurrentUpdateSolrConnector
* Copyright 2013 by Michael Peter Christen
* First released 28.04.2013 at http://yacy.net
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program in the file lgpl21.txt
* If not, see <http://www.gnu.org/licenses/>.
*/
package net.yacy.cora.federate.solr.connector;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import net.yacy.cora.sorting.ReversibleScoreMap;
import net.yacy.cora.storage.ARC;
import net.yacy.cora.storage.ConcurrentARC;
import net.yacy.cora.util.ConcurrentLog;
import net.yacy.kelondro.data.word.Word;
import net.yacy.kelondro.util.MemoryControl;
import net.yacy.search.schema.CollectionSchema;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.client.solrj.util.ClientUtils;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
/**
* The purpose of this connector is to provide a non-blocking interface to solr.
* All time-consuming tasks like updates and deletions are done within a concurrent process
* which is started for this class in the background.
* To implement this, we introduce an id exist cache, a deletion id queue and a update document queue.
*/
public class ConcurrentUpdateSolrConnector implements SolrConnector {
private final static long AUTOCOMMIT = 3000; // milliseconds
private class CommitHandler extends Thread {
@Override
public void run() {
try {
while (ConcurrentUpdateSolrConnector.this.commitProcessRunning) {
commitDocBuffer();
try {Thread.sleep(AUTOCOMMIT);} catch (final InterruptedException e) {
ConcurrentLog.logException(e);
}
}
} finally {
commitDocBuffer();
}
}
}
private SolrConnector connector;
private ARC<String, LoadTimeURL> metadataCache;
//private final ARH<String> missCache;
private final LinkedHashMap<String, SolrInputDocument> docBuffer;
private CommitHandler processHandler;
private final int updateCapacity;
private boolean commitProcessRunning;
public ConcurrentUpdateSolrConnector(final SolrConnector connector, final int updateCapacity, final int idCacheCapacity, final int concurrency) {
this.connector = connector;
this.updateCapacity = updateCapacity;
this.metadataCache = new ConcurrentARC<>(idCacheCapacity, concurrency);
//this.missCache = new ConcurrentARH<>(idCacheCapacity, concurrency);
this.docBuffer = new LinkedHashMap<>();
this.processHandler = null;
this.commitProcessRunning = true;
ensureAliveProcessHandler();
}
@Override
public int hashCode() {
return this.connector.hashCode();
}
@Override
public boolean equals(Object o) {
return o instanceof ConcurrentUpdateSolrConnector && this.connector.equals(((ConcurrentUpdateSolrConnector) o).connector);
}
private void commitDocBuffer() {
synchronized (this.docBuffer) {
//System.out.println("*** commit of " + this.docBuffer.size() + " documents");
//Thread.dumpStack();
if (this.docBuffer.size() > 0) try {
this.connector.add(this.docBuffer.values());
} catch (final OutOfMemoryError e) {
// clear and try again...
clearCaches();
try {
this.connector.add(this.docBuffer.values());
} catch (final IOException ee) {
ConcurrentLog.logException(e);
}
} catch (final IOException e) {
ConcurrentLog.logException(e);
}
this.docBuffer.clear();
}
}
@Override
public int bufferSize() {
return this.docBuffer.size();
}
@Override
public void clearCaches() {
this.connector.clearCaches();
this.metadataCache.clear();
//this.missCache.clear();
}
private void updateCache(final String id, final LoadTimeURL md) {
if (id == null) return;
if (MemoryControl.shortStatus()) {
this.metadataCache.clear();
//this.missCache.clear();
}
this.metadataCache.put(id, md);
//this.missCache.delete(id);
}
public void ensureAliveProcessHandler() {
if (this.processHandler == null || !this.processHandler.isAlive()) {
this.processHandler = new CommitHandler();
this.processHandler.setName(this.getClass().getName() + "_ProcessHandler");
this.processHandler.start();
}
}
@Override
public Iterator<String> iterator() {
return this.connector.iterator();
}
@Override
public long getSize() {
return Math.max(this.metadataCache.size(), this.connector.getSize());
}
@Override
public void commit(boolean softCommit) {
ensureAliveProcessHandler();
commitDocBuffer();
this.connector.commit(softCommit);
}
@Override
public void optimize(int maxSegments) {
commitDocBuffer();
this.connector.optimize(maxSegments);
}
@Override
public int getSegmentCount() {
return this.connector.getSegmentCount();
}
@Override
public boolean isClosed() {
return this.connector == null || this.connector.isClosed();
}
@Override
public void close() {
ensureAliveProcessHandler();
this.commitProcessRunning = false;
try {this.processHandler.join();} catch (final InterruptedException e) {}
this.connector.close();
this.metadataCache.clear();
this.connector = null;
this.metadataCache = null;
}
@Override
public void clear() throws IOException {
this.docBuffer.clear();
this.connector.clear();
this.metadataCache.clear();
//this.missCache.clear();
}
@Override
public synchronized void deleteById(String id) throws IOException {
this.metadataCache.remove(id);
//this.missCache.add(id);
synchronized (this.docBuffer) {
this.docBuffer.remove(id);
}
this.connector.deleteById(id);
}
@Override
public synchronized void deleteByIds(Collection<String> ids) throws IOException {
for (String id: ids) {
this.metadataCache.remove(id);
//this.missCache.add(id);
}
synchronized (this.docBuffer) {
for (String id: ids) {
this.docBuffer.remove(id);
}
}
this.connector.deleteByIds(ids);
}
@Override
public void deleteByQuery(final String querystring) throws IOException {
commitDocBuffer();
try {
this.connector.deleteByQuery(querystring);
this.metadataCache.clear();
} catch (final IOException e) {
ConcurrentLog.severe("ConcurrentUpdateSolrConnector", e.getMessage(), e);
}
}
@Override
public LoadTimeURL getLoadTimeURL(String id) throws IOException {
//if (this.missCache.contains(id)) return null;
LoadTimeURL md = this.metadataCache.get(id);
if (md != null) {
//System.out.println("*** metadata cache hit; metadataCache.size() = " + metadataCache.size());
//Thread.dumpStack();
return md;
}
SolrInputDocument doc = this.docBuffer.get(id);
if (doc != null) {
//System.out.println("*** docBuffer cache hit; docBuffer.size() = " + docBuffer.size());
//Thread.dumpStack();
return AbstractSolrConnector.getLoadTimeURL(doc);
}
md = this.connector.getLoadTimeURL(id);
if (md == null) {/*this.missCache.add(id);*/ return null;}
updateCache(id, md);
return md;
}
@Override
public void add(SolrInputDocument solrdoc) throws IOException, SolrException {
String id = (String) solrdoc.getFieldValue(CollectionSchema.id.getSolrFieldName());
updateCache(id, AbstractSolrConnector.getLoadTimeURL(solrdoc));
ensureAliveProcessHandler();
if (this.processHandler.isAlive()) {
synchronized (this.docBuffer) {this.docBuffer.put(id, solrdoc);}
} else {
this.connector.add(solrdoc);
}
if (MemoryControl.shortStatus() || this.docBuffer.size() > this.updateCapacity) {
commitDocBuffer();
}
}
@Override
public void add(Collection<SolrInputDocument> solrdocs) throws IOException, SolrException {
ensureAliveProcessHandler();
synchronized (this.docBuffer) {
for (SolrInputDocument solrdoc: solrdocs) {
String id = (String) solrdoc.getFieldValue(CollectionSchema.id.getSolrFieldName());
updateCache(id, AbstractSolrConnector.getLoadTimeURL(solrdoc));
if (this.processHandler.isAlive()) {
this.docBuffer.put(id, solrdoc);
} else {
this.connector.add(solrdoc);
}
}
}
if (MemoryControl.shortStatus() || this.docBuffer.size() > this.updateCapacity) {
commitDocBuffer();
}
}
@Override
public SolrDocument getDocumentById(final String id, String... fields) throws IOException {
assert id.length() == Word.commonHashLength : "wrong id: " + id;
//if (this.missCache.contains(id)) return null;
SolrInputDocument idoc = this.docBuffer.get(id);
if (idoc != null) {
//System.out.println("*** docBuffer cache hit; docBuffer.size() = " + docBuffer.size());
//Thread.dumpStack();
return ClientUtils.toSolrDocument(idoc);
}
SolrDocument solrdoc = this.connector.getDocumentById(id, AbstractSolrConnector.ensureEssentialFieldsIncluded(fields));
if (solrdoc == null) {
//this.missCache.add(id);
this.metadataCache.remove(id);
} else {
updateCache(id, AbstractSolrConnector.getLoadTimeURL(solrdoc));
}
return solrdoc;
}
@Override
public QueryResponse getResponseByParams(ModifiableSolrParams query) throws IOException, SolrException {
commitDocBuffer();
return this.connector.getResponseByParams(query);
}
@Override
public SolrDocumentList getDocumentListByParams(ModifiableSolrParams params) throws IOException, SolrException {
commitDocBuffer();
SolrDocumentList sdl = this.connector.getDocumentListByParams(params);
for (SolrDocument doc: sdl) {
String id = (String) doc.getFieldValue(CollectionSchema.id.getSolrFieldName());
updateCache(id, AbstractSolrConnector.getLoadTimeURL(doc));
}
return sdl;
}
@Override
public SolrDocumentList getDocumentListByQuery(String querystring, String sort, int offset, int count, String... fields) throws IOException, SolrException {
commitDocBuffer();
if (offset == 0 && count == 1 && querystring.startsWith("id:") &&
((querystring.length() == 17 && querystring.charAt(3) == '"' && querystring.charAt(16) == '"') ||
querystring.length() == 15)) {
final SolrDocumentList list = new SolrDocumentList();
SolrDocument doc = getDocumentById(querystring.charAt(3) == '"' ? querystring.substring(4, querystring.length() - 1) : querystring.substring(3), fields);
list.add(doc);
return list;
}
SolrDocumentList sdl = this.connector.getDocumentListByQuery(querystring, sort, offset, count, AbstractSolrConnector.ensureEssentialFieldsIncluded(fields));
return sdl;
}
@Override
public long getCountByQuery(String querystring) throws IOException {
commitDocBuffer();
return this.connector.getCountByQuery(querystring);
}
@Override
public LinkedHashMap<String, ReversibleScoreMap<String>> getFacets(String query, int maxresults, String... fields) throws IOException {
commitDocBuffer();
return this.connector.getFacets(query, maxresults, fields);
}
@Override
public BlockingQueue<SolrDocument> concurrentDocumentsByQuery(String querystring, String sort, int offset, int maxcount, long maxtime, int buffersize, final int concurrency, final boolean prefetchIDs, String... fields) {
commitDocBuffer();
return this.connector.concurrentDocumentsByQuery(querystring, sort, offset, maxcount, maxtime, buffersize, concurrency, prefetchIDs, fields);
}
@Override
public BlockingQueue<SolrDocument> concurrentDocumentsByQueries(
List<String> querystrings, String sort, int offset, int maxcount,
long maxtime, int buffersize, int concurrency, boolean prefetchIDs,
String... fields) {
commitDocBuffer();
return this.connector.concurrentDocumentsByQueries(querystrings, sort, offset, maxcount, maxtime, buffersize, concurrency, prefetchIDs, fields);
}
@Override
public BlockingQueue<String> concurrentIDsByQuery(String querystring, String sort, int offset, int maxcount, long maxtime, int buffersize, final int concurrency) {
commitDocBuffer();
return this.connector.concurrentIDsByQuery(querystring, sort, offset, maxcount, maxtime, buffersize, concurrency);
}
@Override
public BlockingQueue<String> concurrentIDsByQueries(
List<String> querystrings, String sort, int offset, int maxcount,
long maxtime, int buffersize, int concurrency) {
commitDocBuffer();
return this.connector.concurrentIDsByQueries(querystrings, sort, offset, maxcount, maxtime, buffersize, concurrency);
}
@Override
public void update(final SolrInputDocument solrdoc) throws IOException, SolrException {
commitDocBuffer();
this.connector.update(solrdoc);
}
@Override
public void update(final Collection<SolrInputDocument> solrdoc) throws IOException, SolrException {
commitDocBuffer();
this.connector.update(solrdoc);
}
}

@ -241,7 +241,7 @@ public class EmbeddedSolrConnector extends SolrServerConnector implements SolrCo
SolrDocumentList sdl = new SolrDocumentList();
NamedList<?> nl = rsp.getValues();
ResultContext resultContext = (ResultContext) nl.get("response");
DocList response = resultContext == null ? new DocSlice(0, 0, new int[0], new float[0], 0, 0.0f) : resultContext.docs;
DocList response = resultContext == null ? new DocSlice(0, 0, new int[0], new float[0], 0, 0.0f) : resultContext.getDocList();
sdl.setNumFound(response == null ? 0 : response.matches());
sdl.setStart(response == null ? 0 : response.offset());
String originalName = Thread.currentThread().getName();
@ -391,7 +391,7 @@ public class EmbeddedSolrConnector extends SolrServerConnector implements SolrCo
NamedList<?> nl = rsp.getValues();
ResultContext resultContext = (ResultContext) nl.get("response");
if (resultContext == null) log.warn("DocListSearcher: no response for query '" + querystring + "'");
this.response = resultContext == null ? new DocSlice(0, 0, new int[0], new float[0], 0, 0.0f) : resultContext.docs;
this.response = resultContext == null ? new DocSlice(0, 0, new int[0], new float[0], 0, 0.0f) : resultContext.getDocList();
}
@Override

@ -30,7 +30,6 @@ import net.yacy.cora.federate.solr.instance.ServerShard;
import net.yacy.cora.util.ConcurrentLog;
import net.yacy.search.schema.CollectionSchema;
import org.apache.lucene.analysis.NumericTokenStream;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
@ -51,12 +50,7 @@ import org.apache.solr.common.params.CommonParams;
public abstract class SolrServerConnector extends AbstractSolrConnector implements SolrConnector {
protected final static ConcurrentLog log = new ConcurrentLog(SolrServerConnector.class.getName());
public final static NumericTokenStream classLoaderSynchro = new NumericTokenStream();
// pre-instantiate this object to prevent sun.misc.Launcher$AppClassLoader deadlocks
// this is a very nasty problem; solr instantiates objects dynamically which can cause deadlocks
static {
assert classLoaderSynchro != null;
}
protected SolrClient server;
protected SolrServerConnector() {

@ -177,7 +177,7 @@ public class EmbeddedInstance implements SolrInstance {
source = new File(solr_config, "solrcore.properties");
}
}
// solr alwasy reads the solrcore.properties file if exists in core/conf directory
// solr always reads the solrcore.properties file if exists in core/conf directory
target = new File(conf, "solrcore.properties");
if (source.exists()) {
@ -206,7 +206,7 @@ public class EmbeddedInstance implements SolrInstance {
@Override
public Collection<String> getCoreNames() {
return this.coreContainer.getCoreNames();
return this.coreContainer.getAllCoreNames();
}
@Override

@ -27,12 +27,10 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import net.yacy.cora.federate.solr.connector.ConcurrentUpdateSolrConnector;
import net.yacy.cora.federate.solr.connector.EmbeddedSolrConnector;
import net.yacy.cora.federate.solr.connector.MirrorSolrConnector;
import net.yacy.cora.federate.solr.connector.RemoteSolrConnector;
import net.yacy.cora.federate.solr.connector.SolrConnector;
import net.yacy.kelondro.util.MemoryControl;
import net.yacy.search.Switchboard;
import net.yacy.search.SwitchboardConstants;
@ -181,9 +179,7 @@ public class InstanceMirror {
if (msc != null) return msc;
EmbeddedSolrConnector esc = getEmbeddedConnector(corename);
RemoteSolrConnector rsc = getRemoteConnector(corename);
int cacheSize = (int) (MemoryControl.available() / 30000); // will return about 10000 for standard ram size
msc = new ConcurrentUpdateSolrConnector(new MirrorSolrConnector(esc, rsc), RemoteInstance.queueSizeByMemory(), cacheSize, Runtime.getRuntime().availableProcessors());
//msc = new MirrorSolrConnector(esc, rsc);
msc = new MirrorSolrConnector(esc, rsc);
this.mirrorConnectorCache.put(corename, msc);
return msc;
}

@ -431,7 +431,7 @@ public class ServerMirror extends SolrClient {
}
@Override
public void shutdown() {
public void close() throws IOException {
if (this.solr0 != null)
try {
this.solr0.close();

@ -487,7 +487,7 @@ public class ServerShard extends SolrClient {
}
@Override
public void shutdown() {
public void close() throws IOException {
for (SolrClient s: this.shards) {
try {
s.close();

@ -50,7 +50,6 @@ import org.apache.solr.schema.TextField;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.DateFormatUtil;
public class EnhancedXMLResponseWriter implements QueryResponseWriter {
@ -80,7 +79,7 @@ public class EnhancedXMLResponseWriter implements QueryResponseWriter {
assert values.get("response") != null;
SimpleOrderedMap<Object> responseHeader = (SimpleOrderedMap<Object>) rsp.getResponseHeader();
DocList response = ((ResultContext) values.get("response")).docs;
DocList response = ((ResultContext) values.get("response")).getDocList();
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> highlighting = (SimpleOrderedMap<Object>) values.get("highlighting");
writeProps(writer, "responseHeader", responseHeader); // this.writeVal("responseHeader", responseHeader);
@ -268,7 +267,7 @@ public class EnhancedXMLResponseWriter implements QueryResponseWriter {
} else if (typeName.equals(SolrType.num_long.printName())) {
writeTag(writer, "long", name, value, true);
} else if (typeName.equals(SolrType.date.printName())) {
writeTag(writer, "date", name, DateFormatUtil.formatExternal(new Date(Long.parseLong(value))), true);
writeTag(writer, "date", name, new Date(Long.parseLong(value)).toInstant().toString(), true);
} else if (typeName.equals(SolrType.num_float.printName())) {
writeTag(writer, "float", name, value, true);
} else if (typeName.equals(SolrType.num_double.printName())) {
@ -286,7 +285,7 @@ public class EnhancedXMLResponseWriter implements QueryResponseWriter {
} else if (value instanceof Long) {
writeTag(writer, "long", name, ((Long) value).toString(), true);
} else if (value instanceof Date) {
writeTag(writer, "date", name, DateFormatUtil.formatExternal((Date) value), true);
writeTag(writer, "date", name, ((Date) value).toInstant().toString(), true);
} else if (value instanceof Float) {
writeTag(writer, "float", name, ((Float) value).toString(), true);
} else if (value instanceof Double) {

@ -42,7 +42,6 @@ import org.apache.solr.schema.TextField;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.DateFormatUtil;
import org.json.simple.JSONArray;
import net.yacy.cora.federate.solr.SolrType;
@ -70,7 +69,7 @@ public class FlatJSONResponseWriter implements QueryResponseWriter {
@Override
public void write(final Writer writer, final SolrQueryRequest request, final SolrQueryResponse rsp) throws IOException {
NamedList<?> values = rsp.getValues();
DocList response = ((ResultContext) values.get("response")).docs;
DocList response = ((ResultContext) values.get("response")).getDocList();
writeDocs(writer, request, response);
}
@ -144,7 +143,7 @@ public class FlatJSONResponseWriter implements QueryResponseWriter {
} else if (typeName.equals(SolrType.num_long.printName())) {
json.put(name, Long.parseLong(value));
} else if (typeName.equals(SolrType.date.printName())) {
json.put(name, DateFormatUtil.formatExternal(new Date(Long.parseLong(value))));
json.put(name, new Date(Long.parseLong(value)).toInstant().toString());
} else if (typeName.equals(SolrType.num_float.printName())) {
json.put(name, Double.parseDouble(value));
} else if (typeName.equals(SolrType.num_double.printName())) {

@ -148,7 +148,7 @@ public class GSAResponseWriter implements QueryResponseWriter {
long start = System.currentTimeMillis();
SimpleOrderedMap<Object> responseHeader = (SimpleOrderedMap<Object>) rsp.getResponseHeader();
DocList response = ((ResultContext) rsp.getValues().get("response")).docs;
DocList response = ((ResultContext) rsp.getValues().get("response")).getDocList();
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> highlighting = (SimpleOrderedMap<Object>) rsp.getValues().get("highlighting");
Map<String, LinkedHashSet<String>> snippets = OpensearchResponseWriter.highlighting(highlighting);

@ -109,7 +109,7 @@ public class GrepHTMLResponseWriter implements QueryResponseWriter {
paramsList.remove("wt");
String xmlquery = dqp.matcher("select?" + SolrParams.toSolrParams(paramsList).toString()).replaceAll("%22");
DocList response = ((ResultContext) values.get("response")).docs;
DocList response = ((ResultContext) values.get("response")).getDocList();
final int sz = response.size();
if (sz > 0) {
SolrIndexSearcher searcher = request.getSearcher();

@ -51,7 +51,6 @@ import org.apache.solr.schema.TextField;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.util.DateFormatUtil;
public class HTMLResponseWriter implements QueryResponseWriter {
@ -157,7 +156,7 @@ public class HTMLResponseWriter implements QueryResponseWriter {
String xmlquery = dqp.matcher("../solr/select?" + SolrParams.toSolrParams(paramsList).toString() + "&core=" + coreName).replaceAll("%22");
DocList response = ((ResultContext) values.get("response")).docs;
DocList response = ((ResultContext) values.get("response")).getDocList();
final int sz = response.size();
if (sz > 0) {
SolrIndexSearcher searcher = request.getSearcher();
@ -283,7 +282,7 @@ public class HTMLResponseWriter implements QueryResponseWriter {
if (typeName.equals(SolrType.bool.printName())) {
return "F".equals(value) ? "false" : "true";
} else if (typeName.equals(SolrType.date.printName())) {
return DateFormatUtil.formatExternal(new Date(Long.parseLong(value)));
return new Date(Long.parseLong(value)).toInstant().toString();
}
return value;
}

@ -110,7 +110,7 @@ public class OpensearchResponseWriter implements QueryResponseWriter {
assert values.get("response") != null;
SimpleOrderedMap<Object> responseHeader = (SimpleOrderedMap<Object>) rsp.getResponseHeader();
DocList response = ((ResultContext) values.get("response")).docs;
DocList response = ((ResultContext) values.get("response")).getDocList();
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> facetCounts = (SimpleOrderedMap<Object>) values.get("facet_counts");
@SuppressWarnings("unchecked")

@ -53,7 +53,7 @@ public class SnapshotImagesReponseWriter implements QueryResponseWriter {
NamedList<Object> paramsList = request.getOriginalParams().toNamedList();
paramsList.remove("wt");
DocList response = ((ResultContext) values.get("response")).docs;
DocList response = ((ResultContext) values.get("response")).getDocList();
final int sz = response.size();
if (sz > 0) {
SolrIndexSearcher searcher = request.getSearcher();

@ -94,7 +94,7 @@ public class YJsonResponseWriter implements QueryResponseWriter {
assert values.get("response") != null;
SimpleOrderedMap<Object> responseHeader = (SimpleOrderedMap<Object>) rsp.getResponseHeader();
DocList response = ((ResultContext) values.get("response")).docs;
DocList response = ((ResultContext) values.get("response")).getDocList();
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> facetCounts = (SimpleOrderedMap<Object>) values.get("facet_counts");
@SuppressWarnings("unchecked")

@ -257,7 +257,7 @@ public class GSAsearchServlet extends HttpServlet {
Object rv = response.getValues().get("response");
int matches = 0;
if (rv != null && rv instanceof ResultContext) {
matches = ((ResultContext) rv).docs.matches();
matches = ((ResultContext) rv).getDocList().matches();
} else if (rv != null && rv instanceof SolrDocumentList) {
matches = (int) ((SolrDocumentList) rv).getNumFound();
}

@ -263,7 +263,7 @@ public class SolrSelectServlet extends HttpServlet {
NamedList<?> values = rsp.getValues();
DocList r = ((ResultContext) values.get("response")).docs;
DocList r = ((ResultContext) values.get("response")).getDocList();
int numFound = r.matches();
AccessTracker.addToDump(querystring, numFound, new Date(), "sq");

@ -83,13 +83,12 @@ import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.core.SolrInfoMBean;
import org.apache.solr.util.DateFormatUtil;
import org.apache.lucene.util.Version;
public final class Fulltext {
private static final String SOLR_PATH = "solr_5_5"; // the number should be identical to the number in the property luceneMatchVersion in solrconfig.xml
private static final String SOLR_OLD_PATH[] = new String[]{"solr_36", "solr_40", "solr_44", "solr_45", "solr_46", "solr_47", "solr_4_9", "solr_4_10", "solr_5_2"};
private static final String SOLR_PATH = "solr_6_6"; // the number should be identical to the number in the property luceneMatchVersion in solrconfig.xml
private static final String SOLR_OLD_PATH[] = new String[]{"solr_36", "solr_40", "solr_44", "solr_45", "solr_46", "solr_47", "solr_4_9", "solr_4_10", "solr_5_2", "solr_5_5"};
// class objects
private final File segmentPath;
@ -666,8 +665,8 @@ public final class Fulltext {
long now = System.currentTimeMillis();
if (maxseconds > 0) {
long from = now - maxseconds * 1000L;
String nowstr = DateFormatUtil.formatExternal(new Date(now));
String fromstr = DateFormatUtil.formatExternal(new Date(from));
String nowstr = new Date(now).toInstant().toString();
String fromstr = new Date(from).toInstant().toString();
String dateq = CollectionSchema.load_date_dt.getSolrFieldName() + ":[" + fromstr + " TO " + nowstr + "]";
query = query == null || AbstractSolrConnector.CATCHALL_QUERY.equals(query) ? dateq : query + " AND " + dateq;
} else {

@ -26,7 +26,6 @@ import java.util.Date;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.MultiMapSolrParams;
import org.apache.solr.util.DateFormatUtil;
import net.yacy.cora.document.id.DigestURL;
import net.yacy.cora.util.CommonPattern;
@ -382,7 +381,7 @@ public class QueryModifier {
Date onDate = DateDetection.parseLine(onDescription, timezoneOffset);
StringBuilder filterQuery = new StringBuilder(20);
if (onDate != null) {
String dstr = DateFormatUtil.formatExternal(onDate);
String dstr = onDate.toInstant().toString();
filterQuery.append(CollectionSchema.dates_in_content_dts.getSolrFieldName()).append(":[").append(dstr).append(" TO ").append(dstr).append(']');
}
return filterQuery.toString();
@ -393,8 +392,8 @@ public class QueryModifier {
Date toDate = to == null || to.equals("*") ? null : DateDetection.parseLine(to, timezoneOffset);
StringBuilder filterQuery = new StringBuilder(20);
if (fromDate != null && toDate != null) {
String dstrFrom = DateFormatUtil.formatExternal(fromDate);
String dstrTo = DateFormatUtil.formatExternal(toDate);
String dstrFrom = fromDate.toInstant().toString();
String dstrTo = toDate.toInstant().toString();
filterQuery.append(CollectionSchema.dates_in_content_dts.getSolrFieldName()).append(":[").append(dstrFrom).append(" TO ").append(dstrTo).append(']');
}
return filterQuery.toString();

@ -76,7 +76,6 @@ import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrQuery.SortClause;
import org.apache.solr.common.params.DisMaxParams;
import org.apache.solr.common.params.FacetParams;
import org.apache.solr.util.DateFormatUtil;
public final class QueryParams {
@ -468,8 +467,8 @@ public final class QueryParams {
for (String field: this.facetfields) params.addFacetField("{!ex=" + field + "}" + field); // params.addFacetField("{!ex=" + field + "}" + field);
if (this.facetfields.contains(CollectionSchema.dates_in_content_dts.name())) {
params.setParam(FacetParams.FACET_RANGE, CollectionSchema.dates_in_content_dts.name());
String start = DateFormatUtil.formatExternal(new Date(System.currentTimeMillis() - 1000L * 60L * 60L * 24L * 3));
String end = DateFormatUtil.formatExternal(new Date(System.currentTimeMillis() + 1000L * 60L * 60L * 24L * 3));
String start = new Date(System.currentTimeMillis() - 1000L * 60L * 60L * 24L * 3).toInstant().toString();
String end = new Date(System.currentTimeMillis() + 1000L * 60L * 60L * 24L * 3).toInstant().toString();
params.setParam("f." + CollectionSchema.dates_in_content_dts.getSolrFieldName() + ".facet.range.start", start);
params.setParam("f." + CollectionSchema.dates_in_content_dts.getSolrFieldName() + ".facet.range.end", end);
params.setParam("f." + CollectionSchema.dates_in_content_dts.getSolrFieldName() + ".facet.range.gap", "+1DAY");

@ -5,7 +5,6 @@ import java.util.Date;
import java.util.LinkedHashSet;
import java.util.Set;
import java.util.TimeZone;
import org.apache.solr.util.DateFormatUtil;
import org.junit.Test;
import static org.junit.Assert.*;
@ -35,8 +34,8 @@ public class DateDetectionTest {
Date d = DateDetection.parseLine(text, 0);
// this formatter is used to create Solr search queries, use it to compare equality
String cs = DateFormatUtil.formatExternal(cal.getTime());
String ds = DateFormatUtil.formatExternal(d);
String cs = cal.getTime().toInstant().toString();
String ds = d.toInstant().toString();
assertEquals(text, cs, ds);
}
@ -55,8 +54,8 @@ public class DateDetectionTest {
Date d = DateDetection.parseLine(text, 0);
// this formatter is used to create Solr search queries, use it to compare equality
String cs = DateFormatUtil.formatExternal(cal.getTime());
String ds = DateFormatUtil.formatExternal(d);
String cs = cal.getTime().toInstant().toString();
String ds = d.toInstant().toString();
assertEquals(text, cs, ds);
}
@ -79,8 +78,8 @@ public class DateDetectionTest {
Date d = DateDetection.parseLine(text, 0);
// this formatter is used to create Solr search queries, use it to compare equality
String cs = DateFormatUtil.formatExternal(cal.getTime());
String ds = DateFormatUtil.formatExternal(d);
String cs = cal.getTime().toInstant().toString();
String ds = d.toInstant().toString();
assertEquals(text, cs, ds);
}

Loading…
Cancel
Save