migration to solr 4.0.0

pull/1/head
Michael Peter Christen 13 years ago
parent b764de424a
commit e2c4c3c7d3

@ -49,22 +49,27 @@
<classpathentry kind="lib" path="lib/fontbox-1.7.0.jar"/>
<classpathentry kind="lib" path="lib/pdfbox-1.7.0.jar"/>
<classpathentry kind="lib" path="lib/jempbox-1.7.0.jar"/>
<classpathentry kind="lib" path="lib/apache-solr-core-3.6.1.jar" sourcepath="/Users/admin/Development/sourcecode/solr-3.6.1/core/src/java"/>
<classpathentry kind="lib" path="lib/apache-solr-solrj-3.6.1.jar" sourcepath="/Users/admin/Development/sourcecode/solr-3.6.1/core/src/java"/>
<classpathentry kind="lib" path="lib/lucene-core-3.6.1.jar"/>
<classpathentry kind="lib" path="lib/lucene-analyzers-3.6.1.jar"/>
<classpathentry kind="lib" path="lib/lucene-grouping-3.6.1.jar"/>
<classpathentry kind="lib" path="lib/lucene-highlighter-3.6.1.jar"/>
<classpathentry kind="lib" path="lib/lucene-phonetic-3.6.1.jar"/>
<classpathentry kind="lib" path="lib/lucene-spatial-3.6.1.jar"/>
<classpathentry kind="lib" path="lib/lucene-spellchecker-3.6.1.jar"/>
<classpathentry kind="lib" path="lib/lucene-memory-3.6.1.jar"/>
<classpathentry kind="lib" path="lib/jaudiotagger-2.0.4-20111207.115108-15.jar"/>
<classpathentry kind="lib" path="lib/commons-codec-1.7.jar"/>
<classpathentry kind="lib" path="lib/jcl-over-slf4j-1.6.4.jar"/>
<classpathentry kind="lib" path="lib/log4j-over-slf4j-1.6.4.jar"/>
<classpathentry kind="lib" path="lib/slf4j-api-1.6.4.jar"/>
<classpathentry kind="lib" path="lib/slf4j-jdk14-1.6.4.jar"/>
<classpathentry kind="lib" path="lib/apache-solr-core-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/apache-solr-solrj-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-analyzers-common-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-analyzers-phonetic-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-core-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-grouping-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-highlighter-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-memory-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-misc-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-queries-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-queryparser-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-spatial-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/lucene-suggest-4.0.0.jar"/>
<classpathentry kind="lib" path="lib/zookeeper-3.3.6.jar"/>
<classpathentry kind="lib" path="lib/spatial4j-0.3.jar"/>
<classpathentry kind="con" path="org.eclipse.jdt.junit.JUNIT_CONTAINER/4"/>
<classpathentry kind="lib" path="lib/icu4j-core.jar"/>
<classpathentry kind="lib" path="lib/htmllexer.jar"/>

@ -40,8 +40,8 @@
<string>$JAVAROOT/lib/J7Zip-modified.jar</string>
<string>$JAVAROOT/lib/activation.jar</string>
<string>$JAVAROOT/lib/apache-mime4j-0.6.jar</string>
<string>$JAVAROOT/lib/apache-solr-core-3.6.1.jar</string>
<string>$JAVAROOT/lib/apache-solr-solrj-3.6.1.jar</string>
<string>$JAVAROOT/lib/apache-solr-core-4.0.0.jar</string>
<string>$JAVAROOT/lib/apache-solr-solrj-4.0.0.jar</string>
<string>$JAVAROOT/lib/arq-2.8.7.jar</string>
<string>$JAVAROOT/lib/bcmail-jdk15-145.jar</string>
<string>$JAVAROOT/lib/bcprov-jdk15-145.jar</string>
@ -63,6 +63,7 @@
<string>$JAVAROOT/lib/icu4j-core.jar</string>
<string>$JAVAROOT/lib/iri-0.8.jar</string>
<string>$JAVAROOT/lib/jakarta-oro-2.0.8.jar</string>
<string>$JAVAROOT/lib/jaudiotagger-2.0.4-20111207.115108-15.jar</string>
<string>$JAVAROOT/lib/jcifs-1.3.15.jar</string>
<string>$JAVAROOT/lib/jcl-over-slf4j-1.6.4.jar</string>
<string>$JAVAROOT/lib/jempbox-1.7.0.jar</string>
@ -73,14 +74,17 @@
<string>$JAVAROOT/lib/json-simple-1.1.jar</string>
<string>$JAVAROOT/lib/log4j-1.2.16.jar</string>
<string>$JAVAROOT/lib/log4j-over-slf4j-1.6.4.jar</string>
<string>$JAVAROOT/lib/lucene-analyzers-3.6.1.jar</string>
<string>$JAVAROOT/lib/lucene-grouping-3.6.1.jar</string>
<string>$JAVAROOT/lib/lucene-core-3.6.1.jar</string>
<string>$JAVAROOT/lib/lucene-highlighter-3.6.1.jar</string>
<string>$JAVAROOT/lib/lucene-phonetic-3.6.1.jar</string>
<string>$JAVAROOT/lib/lucene-spatial-3.6.1.jar</string>
<string>$JAVAROOT/lib/lucene-spellchecker-3.6.1.jar</string>
<string>$JAVAROOT/lib/lucene-memory-3.6.1.jar</string>
<string>$JAVAROOT/lib/lucene-analyzers-common-4.0.0.jar</string>
<string>$JAVAROOT/lib/lucene-analyzers-phonetic-4.0.0.jar</string>
<string>$JAVAROOT/lib/lucene-core-4.0.0.jar</string>
<string>$JAVAROOT/lib/lucene-grouping-4.0.0.jar</string>
<string>$JAVAROOT/lib/lucene-highlighter-4.0.0.jar</string>
<string>$JAVAROOT/lib/lucene-memory-4.0.0.jar</string>
<string>$JAVAROOT/lib/lucene-misc-4.0.0.jar</string>
<string>$JAVAROOT/lib/lucene-queries-4.0.0.jar</string>
<string>$JAVAROOT/lib/lucene-queryparser-4.0.0.jar</string>
<string>$JAVAROOT/lib/lucene-spatial-4.0.0.jar</string>
<string>$JAVAROOT/lib/lucene-suggest-4.0.0.jar</string>
<string>$JAVAROOT/lib/metadata-extractor-2.4.0-beta-1.jar</string>
<string>$JAVAROOT/lib/mysql-connector-java-5.1.12-bin.jar</string>
<string>$JAVAROOT/lib/pdfbox-1.7.0.jar</string>
@ -90,11 +94,13 @@
<string>$JAVAROOT/lib/servlet-api-2.5-20081211.jar</string>
<string>$JAVAROOT/lib/slf4j-api-1.6.4.jar</string>
<string>$JAVAROOT/lib/slf4j-jdk14-1.6.4.jar</string>
<string>$JAVAROOT/lib/spatial4j-0.3.jar</string>
<string>$JAVAROOT/lib/webcat-0.1-swf.jar</string>
<string>$JAVAROOT/lib/wstx-asl-3.2.7.jar</string>
<string>$JAVAROOT/lib/xercesImpl.jar</string>
<string>$JAVAROOT/lib/xml-apis.jar</string>
<string>$JAVAROOT/lib/yacycore.jar</string>
<string>$JAVAROOT/lib/zookeeper-3.3.6.jar</string>
</array>
<key>Properties</key>
<dict>

@ -159,8 +159,8 @@
<pathelement location="${lib}/J7Zip-modified.jar" />
<pathelement location="${lib}/activation.jar" />
<pathelement location="${lib}/apache-mime4j-0.6.jar" />
<pathelement location="${lib}/apache-solr-core-3.6.1.jar" />
<pathelement location="${lib}/apache-solr-solrj-3.6.1.jar" />
<pathelement location="${lib}/apache-solr-core-4.0.0.jar" />
<pathelement location="${lib}/apache-solr-solrj-4.0.0.jar" />
<pathelement location="${lib}/arq-2.8.7.jar" />
<pathelement location="${lib}/bcmail-jdk15-145.jar" />
<pathelement location="${lib}/bcprov-jdk15-145.jar" />
@ -182,6 +182,7 @@
<pathelement location="${lib}/icu4j-core.jar" />
<pathelement location="${lib}/iri-0.8.jar" />
<pathelement location="${lib}/jakarta-oro-2.0.8.jar" />
<pathelement location="${lib}/jaudiotagger-2.0.4-20111207.115108-15.jar" />
<pathelement location="${lib}/jcifs-1.3.15.jar" />
<pathelement location="${lib}/jcl-over-slf4j-1.6.4.jar" />
<pathelement location="${lib}/jempbox-1.7.0" />
@ -193,14 +194,17 @@
<pathelement location="${lib}/jsoup-1.6.3.jar" />
<pathelement location="${lib}/log4j-1.2.16.jar" />
<pathelement location="${lib}/log4j-over-slf4j-1.6.4.jar" />
<pathelement location="${lib}/lucene-analyzers-3.6.1.jar" />
<pathelement location="${lib}/lucene-grouping-3.6.1.jar" />
<pathelement location="${lib}/lucene-core-3.6.1.jar" />
<pathelement location="${lib}/lucene-highlighter-3.6.1.jar" />
<pathelement location="${lib}/lucene-phonetic-3.6.1.jar" />
<pathelement location="${lib}/lucene-spatial-3.6.1.jar" />
<pathelement location="${lib}/lucene-spellchecker-3.6.1.jar" />
<pathelement location="${lib}/lucene-memory-3.6.1.jar" />
<pathelement location="${lib}/lucene-analyzers-common-4.0.0.jar" />
<pathelement location="${lib}/lucene-analyzers-phonetic-4.0.0.jar" />
<pathelement location="${lib}/lucene-core-4.0.0.jar" />
<pathelement location="${lib}/lucene-grouping-4.0.0.jar" />
<pathelement location="${lib}/lucene-highlighter-4.0.0.jar" />
<pathelement location="${lib}/lucene-memory-4.0.0.jar" />
<pathelement location="${lib}/lucene-misc-4.0.0.jar" />
<pathelement location="${lib}/lucene-queries-4.0.0.jar" />
<pathelement location="${lib}/lucene-queryparser-4.0.0.jar" />
<pathelement location="${lib}/lucene-spatial-4.0.0.jar" />
<pathelement location="${lib}/lucene-suggest-4.0.0.jar" />
<pathelement location="${lib}/metadata-extractor-2.4.0-beta-1.jar" />
<pathelement location="${lib}/mysql-connector-java-5.1.12-bin.jar" />
<pathelement location="${lib}/pdfbox-1.7.0.jar" />
@ -210,11 +214,12 @@
<pathelement location="${lib}/servlet-api-2.5-20081211.jar" />
<pathelement location="${lib}/slf4j-api-1.6.4.jar" />
<pathelement location="${lib}/slf4j-jdk14-1.6.4.jar" />
<pathelement location="${lib}/spatial4j-0.3.jar" />
<pathelement location="${lib}/webcat-0.1-swf.jar" />
<pathelement location="${lib}/wstx-asl-3.2.7.jar" />
<pathelement location="${lib}/xercesImpl.jar" />
<pathelement location="${lib}/xml-apis.jar" />
<pathelement location="${lib}/jaudiotagger-2.0.4-20111207.115108-15.jar" />
<pathelement location="${lib}/zookeeper-3.3.6.jar" />
</path>
<target name="compile-core" depends="init" description="compile YaCy core">

@ -14,12 +14,12 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
-->
<!--
For more details about configurations options that may appear in
this file, see http://wiki.apache.org/solr/SolrConfigXml.
-->
-->
<config>
<!-- In all configuration below, a prefix of "solr." for class names
is an alias that causes solr to search appropriate packages,
@ -29,27 +29,15 @@
have your own custom plugins.
-->
<!-- Set this to 'false' if you want solr to continue working after
it has encountered an severe configuration error. In a
production environment, you may want solr to keep working even
if one handler is mis-configured.
You may also set this to false using by setting the system
property:
-Dsolr.abortOnConfigurationError=false
-->
<abortOnConfigurationError>${solr.abortOnConfigurationError:true}</abortOnConfigurationError>
<!-- Controls what version of Lucene various components of Solr
adhere to. Generally, you want to use the latest version to
get all bug fixes and improvements. It is highly recommended
that you fully re-index after changing this setting as it can
affect both how text is indexed and queried.
-->
<luceneMatchVersion>LUCENE_36</luceneMatchVersion>
<luceneMatchVersion>LUCENE_40</luceneMatchVersion>
<!-- lib directives can be used to instruct Solr to load an Jars
<!-- <lib/> directives can be used to instruct Solr to load an Jars
identified and use them to resolve any "plugins" specified in
your solrconfig.xml or schema.xml (ie: Analyzers, Request
Handlers, etc...).
@ -57,6 +45,12 @@
All directories and paths are resolved relative to the
instanceDir.
Please note that <lib/> directives are processed in the order
that they appear in your solrconfig.xml file, and are "stacked"
on top of each other when building a ClassLoader - so if you have
plugin jars with dependencies on other jars, the "lower level"
dependency jars should be loaded first.
If a "./lib" directory exists in your instanceDir, all files
found in it are included as if you had used the following
syntax...
@ -67,29 +61,25 @@
<!-- A 'dir' option by itself adds any files found in the directory
to the classpath, this is useful for including all jars in a
directory.
-->
<!--
<lib dir="../add-everything-found-in-this-dir-to-the-classpath" />
-->
<!-- When a 'regex' is specified in addition to a 'dir', only the
When a 'regex' is specified in addition to a 'dir', only the
files in that directory which completely match the regex
(anchored on both ends) will be included.
-->
<lib dir="../../dist/" regex="apache-solr-cell-\d.*\.jar" />
<lib dir="../../contrib/extraction/lib" regex=".*\.jar" />
<lib dir="../../dist/" regex="apache-solr-clustering-\d.*\.jar" />
<lib dir="../../contrib/clustering/lib/" regex=".*\.jar" />
The examples below can be used to load some solr-contribs along
with their external dependencies.
-->
<lib dir="../../../contrib/extraction/lib" regex=".*\.jar" />
<lib dir="../../../dist/" regex="apache-solr-cell-\d.*\.jar" />
<lib dir="../../dist/" regex="apache-solr-dataimporthandler-\d.*\.jar" />
<lib dir="../../contrib/dataimporthandler/lib/" regex=".*\.jar" />
<lib dir="../../../contrib/clustering/lib/" regex=".*\.jar" />
<lib dir="../../../dist/" regex="apache-solr-clustering-\d.*\.jar" />
<lib dir="../../dist/" regex="apache-solr-langid-\d.*\.jar" />
<lib dir="../../contrib/langid/lib/" regex=".*\.jar" />
<lib dir="../../../contrib/langid/lib/" regex=".*\.jar" />
<lib dir="../../../dist/" regex="apache-solr-langid-\d.*\.jar" />
<lib dir="../../dist/" regex="apache-solr-velocity-\d.*\.jar" />
<lib dir="../../contrib/velocity/lib" regex=".*\.jar" />
<lib dir="../../../contrib/velocity/lib" regex=".*\.jar" />
<lib dir="../../../dist/" regex="apache-solr-velocity-\d.*\.jar" />
<!-- If a 'dir' option (with or without a regex) is used and nothing
is found that matches, it will be ignored
@ -97,8 +87,8 @@
<lib dir="/total/crap/dir/ignored" />
<!-- an exact 'path' can be used instead of a 'dir' to specify a
specific file. This will cause a serious error to be logged if
it can't be loaded.
specific jar file. This will cause a serious error to be logged
if it can't be loaded.
-->
<!--
<lib path="../a-jar-that-does-not-exist.jar" />
@ -116,31 +106,33 @@
<!-- The DirectoryFactory to use for indexes.
solr.StandardDirectoryFactory, the default, is filesystem
solr.StandardDirectoryFactory is filesystem
based and tries to pick the best implementation for the current
JVM and platform. One can force a particular implementation
via solr.MMapDirectoryFactory, solr.NIOFSDirectoryFactory, or
solr.SimpleFSDirectoryFactory.
JVM and platform. solr.NRTCachingDirectoryFactory, the default,
wraps solr.StandardDirectoryFactory and caches small files in memory
for better NRT performance.
One can force a particular implementation via solr.MMapDirectoryFactory,
solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
solr.RAMDirectoryFactory is memory based, not
persistent, and doesn't work with replication.
-->
<!-- <directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.StandardDirectoryFactory}"/> -->
<!-- for solr.MMapDirectoryFactory see: https://issues.apache.org/jira/browse/SOLR-2247 -->
<directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.MMapDirectoryFactory}"/>
<directoryFactory name="DirectoryFactory"
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Index Config - These settings control low-level behavior of indexing
Most example settings here show the default value, but are commented
out, to more easily see where customizations have been made.
Note: As of Solr 3.6, the <indexDefaults> and <mainIndex> sections
are deprecated and not shown in the example config. They will
still work, but will go away for good in 4.0
Note: This replaces <indexDefaults> and <mainIndex> from older versions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
<indexConfig>
<!-- maxFieldLength specifies max number of *tokens* indexed per field. Default: 10000 -->
<!-- <maxFieldLength>10000</maxFieldLength> -->
<!-- maxFieldLength was removed in 4.0. To get similar behavior, include a
LimitTokenCountFilterFactory in your fieldType definition. E.g.
<filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
-->
<!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
<!-- <writeLockTimeout>1000</writeLockTimeout> -->
@ -308,7 +300,7 @@
<!-- AutoCommit
Perform a <commit/> automatically under certain conditions.
Perform a hard commit automatically under certain conditions.
Instead of enabling autoCommit, consider using "commitWithin"
when adding documents.
@ -317,15 +309,27 @@
maxDocs - Maximum number of documents to add since the last
commit before automatically triggering a new commit.
maxTime - Maximum amount of time that is allowed to pass
maxTime - Maximum amount of time in ms that is allowed to pass
since a document was added before automaticly
triggering a new commit.
openSearcher - if false, the commit causes recent index changes
to be flushed to stable storage, but does not cause a new
searcher to be opened to make those changes visible.
-->
<!--
<autoCommit>
<maxDocs>10000</maxDocs>
<maxTime>1000</maxTime>
<maxTime>15000</maxTime>
<openSearcher>false</openSearcher>
</autoCommit>
<!-- softAutoCommit is like autoCommit except it causes a
'soft' commit which only ensures that changes are visible
but does not ensure that data is synced to disk. This is
faster and more near-realtime friendly than a hard commit.
-->
<!--
<autoSoftCommit>
<maxTime>1000</maxTime>
</autoSoftCommit>
-->
<!-- Update Related Event Listeners
@ -359,6 +363,15 @@
<arr name="env"> <str>MYVAR=val1</str> </arr>
</listener>
-->
<!-- Enables a transaction log, currently used for real-time get.
"dir" - the target directory for transaction logs, defaults to the
solr data directory. -->
<updateLog>
<str name="dir">${solr.data.dir:}</str>
</updateLog>
</updateHandler>
<!-- IndexReaderFactory
@ -450,19 +463,18 @@
and old cache.
-->
<filterCache class="solr.FastLRUCache"
size="100"
initialSize="10"
size="512"
initialSize="512"
autowarmCount="0"/>
<!-- Query Result Cache
Caches results of searches - ordered lists of document ids
(DocList) based on a query, a sort, and the range of documents
requested.
(DocList) based on a query, a sort, and the range of documents requested.
-->
<queryResultCache class="solr.FastLRUCache"
size="100"
initialSize="10"
<queryResultCache class="solr.LRUCache"
size="512"
initialSize="512"
autowarmCount="0"/>
<!-- Document Cache
@ -471,9 +483,9 @@
document). Since Lucene internal document ids are transient,
this cache will not be autowarmed.
-->
<documentCache class="solr.FastLRUCache"
size="100"
initialSize="10"
<documentCache class="solr.LRUCache"
size="512"
initialSize="512"
autowarmCount="0"/>
<!-- Field Value Cache
@ -482,12 +494,12 @@
by document id. The fieldValueCache is created by default
even if not configured here.
-->
<!--
<fieldValueCache class="solr.FastLRUCache"
size="100"
autowarmCount="0"
size="512"
autowarmCount="128"
showItems="32" />
-->
<!-- Custom Cache
@ -541,9 +553,9 @@
An optimization for use with the queryResultCache. When a search
is requested, a superset of the requested number of document ids
are collected. For example, if a search for a particular query
requests matching documents 10 through 19, and queryWindowSize
is 50, then documents 0 through 49 will be collected and cached.
Any further requests in that range can be satisfied via the cache.
requests matching documents 10 through 19, and queryWindowSize is 50,
then documents 0 through 49 will be collected and cached. Any further
requests in that range can be satisfied via the cache.
-->
<queryResultWindowSize>20</queryResultWindowSize>
@ -615,13 +627,21 @@
This section contains instructions for how the SolrDispatchFilter
should behave when processing requests for this SolrCore.
If you wish to regain use of /select?qt=... style request handler
dispatching, then first add handleSelect="true" to
<requestDispatcher>. Then change the name of the request handler
named "/select" to something else without a leading "/", such as
simply "select" and add default="true" to it.
handleSelect is a legacy option that affects the behavior of requests
such as /select?qt=XXX
handleSelect="true" will cause the SolrDispatchFilter to process
the request and dispatch the query to a handler specified by the
"qt" param, assuming "/select" isn't already registered.
handleSelect="false" will cause the SolrDispatchFilter to
ignore "/select" requests, resulting in a 404 unless a handler
is explicitly registered with the name "/select"
handleSelect="true" is not recommended for new users, but is the default
for backwards compatibility
-->
<requestDispatcher>
<requestDispatcher handleSelect="false" >
<!-- Request Parsing
These settings indicate how Solr Requests may be parsed, and
@ -630,7 +650,6 @@
enableRemoteStreaming - enables use of the stream.file
and stream.url parameters for specifying remote streams.
SearchRequestHandler won't fetch it, but some others do.
multipartUploadLimitInKB - specifies the max size of
Multipart File Uploads that Solr will allow in a Request.
@ -703,8 +722,16 @@
http://wiki.apache.org/solr/SolrRequestHandler
Incoming queries will be dispatched to the correct handler
based on the matching request path piece.
Incoming queries will be dispatched to a specific handler by name
based on the path specified in the request.
Legacy behavior: If the request path uses "/select" but no Request
Handler has that name, and if handleSelect="true" has been specified in
the requestDispatcher, then the Request Handler is dispatched based on
the qt parameter. Handlers without a leading '/' are accessed this way
like so: http://host/app/[core/]select?qt=name If no qt is
given, then the requestHandler that declares default="true" will be
used or the one named "standard".
If a Request Handler is declared with startup="lazy", then it will
not be initialized until the first request that uses it.
@ -782,6 +809,29 @@
-->
</requestHandler>
<!-- A request handler that returns indented JSON by default -->
<requestHandler name="/query" class="solr.SearchHandler">
<lst name="defaults">
<str name="echoParams">explicit</str>
<str name="wt">json</str>
<str name="indent">true</str>
<str name="df">text</str>
</lst>
</requestHandler>
<!-- realtime get handler, guaranteed to return the latest stored fields of
any document, without the need to commit or open a new searcher. The
current implementation relies on the updateLog feature being enabled. -->
<requestHandler name="/get" class="solr.RealTimeGetHandler">
<lst name="defaults">
<str name="omitHeader">true</str>
<str name="wt">json</str>
<str name="indent">true</str>
</lst>
</requestHandler>
<!-- A Robust Example
This example SearchHandler declaration shows off usage of the
@ -797,29 +847,35 @@
<!-- VelocityResponseWriter settings -->
<str name="wt">velocity</str>
<str name="v.template">browse</str>
<str name="v.layout">layout</str>
<str name="title">Solritas</str>
<str name="df">text</str>
<!-- Query settings -->
<str name="defType">edismax</str>
<str name="qf">
text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
</str>
<str name="df">text</str>
<str name="mm">100%</str>
<str name="q.alt">*:*</str>
<str name="rows">10</str>
<str name="fl">*,score</str>
<str name="mlt.qf">
text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
title^10.0 description^5.0 keywords^5.0 author^2.0 resourcename^1.0
</str>
<str name="mlt.fl">text,features,name,sku,id,manu,cat</str>
<str name="mlt.fl">text,features,name,sku,id,manu,cat,title,description,keywords,author,resourcename</str>
<int name="mlt.count">3</int>
<str name="qf">
text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
</str>
<!-- Faceting defaults -->
<str name="facet">on</str>
<str name="facet.field">cat</str>
<str name="facet.field">manu_exact</str>
<str name="facet.field">content_type</str>
<str name="facet.field">author_s</str>
<str name="facet.query">ipod</str>
<str name="facet.query">GB</str>
<str name="facet.mincount">1</str>
@ -840,34 +896,59 @@
<str name="f.manufacturedate_dt.facet.range.other">before</str>
<str name="f.manufacturedate_dt.facet.range.other">after</str>
<!-- Highlighting defaults -->
<str name="hl">on</str>
<str name="hl.fl">text features name</str>
<str name="hl.fl">content features title name</str>
<str name="hl.encoder">html</str>
<str name="hl.simple.pre">&lt;b&gt;</str>
<str name="hl.simple.post">&lt;/b&gt;</str>
<str name="f.title.hl.fragsize">0</str>
<str name="f.title.hl.alternateField">title</str>
<str name="f.name.hl.fragsize">0</str>
<str name="f.name.hl.alternateField">name</str>
<str name="f.content.hl.snippets">3</str>
<str name="f.content.hl.fragsize">200</str>
<str name="f.content.hl.alternateField">content</str>
<str name="f.content.hl.maxAlternateFieldLength">750</str>
<!-- Spell checking defaults -->
<str name="spellcheck">on</str>
<str name="spellcheck.extendedResults">false</str>
<str name="spellcheck.count">5</str>
<str name="spellcheck.alternativeTermCount">2</str>
<str name="spellcheck.maxResultsForSuggest">5</str>
<str name="spellcheck.collate">true</str>
<str name="spellcheck.collateExtendedResults">true</str>
<str name="spellcheck.maxCollationTries">5</str>
<str name="spellcheck.maxCollations">3</str>
</lst>
<!-- append spellchecking to our list of components -->
<arr name="last-components">
<str>spellcheck</str>
</arr>
<!--
<str name="url-scheme">httpx</str>
-->
</requestHandler>
<!-- XML Update Request Handler.
<!-- Update Request Handler.
http://wiki.apache.org/solr/UpdateXmlMessages
The canonical Request Handler for Modifying the Index through
commands specified using XML.
commands specified using XML, JSON, CSV, or JAVABIN
Note: Since solr1.1 requestHandlers requires a valid content
type header if posted in the body. For example, curl now
requires: -H 'Content-type:text/xml; charset=utf-8'
To override the request content type and force a specific
Content-type, use the request parameter:
?update.contentType=text/csv
This handler will pick a response format to match the input
if the 'wt' parameter is not explicit
-->
<requestHandler name="/update"
class="solr.XmlUpdateRequestHandler">
<requestHandler name="/update" class="solr.UpdateRequestHandler">
<!-- See below for information on defining
updateRequestProcessorChains that can be used by name
on each Update Request
@ -878,25 +959,18 @@
</lst>
-->
</requestHandler>
<!-- Binary Update Request Handler
http://wiki.apache.org/solr/javabin
-->
<requestHandler name="/update/javabin"
class="solr.BinaryUpdateRequestHandler" />
<!-- CSV Update Request Handler
http://wiki.apache.org/solr/UpdateCSV
-->
<requestHandler name="/update/csv"
class="solr.CSVRequestHandler"
startup="lazy" />
<!-- JSON Update Request Handler
http://wiki.apache.org/solr/UpdateJSON
-->
<requestHandler name="/update/json"
class="solr.JsonUpdateRequestHandler"
startup="lazy" />
<!-- for back compat with clients using /update/json and /update/csv -->
<requestHandler name="/update/json" class="solr.JsonUpdateRequestHandler">
<lst name="defaults">
<str name="stream.contentType">application/json</str>
</lst>
</requestHandler>
<requestHandler name="/update/csv" class="solr.CSVRequestHandler">
<lst name="defaults">
<str name="stream.contentType">application/csv</str>
</lst>
</requestHandler>
<!-- Solr Cell Update Request Handler
@ -907,9 +981,6 @@
startup="lazy"
class="solr.extraction.ExtractingRequestHandler" >
<lst name="defaults">
<!-- All the main content goes into "text"... if you need to return
the extracted text or do highlighting, use a stored field. -->
<str name="fmap.content">text</str>
<str name="lowernames">true</str>
<str name="uprefix">ignored_</str>
@ -920,12 +991,6 @@
</lst>
</requestHandler>
<!-- XSLT Update Request Handler
Transforms incoming XML with stylesheet identified by tr=
-->
<requestHandler name="/update/xslt"
startup="lazy"
class="solr.XsltUpdateRequestHandler"/>
<!-- Field Analysis Request Handler
@ -955,7 +1020,7 @@
http://wiki.apache.org/solr/AnalysisRequestHandler
An analysis handler that provides a breakdown of the analysis
process of provided docuemnts. This handler expects a (single)
process of provided documents. This handler expects a (single)
content stream with the following format:
<docs>
@ -1021,6 +1086,12 @@
<lst name="defaults">
<str name="echoParams">all</str>
</lst>
<!-- An optional feature of the PingRequestHandler is to configure the
handler with a "healthcheckFile" which can be used to enable/disable
the PingRequestHandler.
relative paths are resolved against the data dir
-->
<!-- <str name="healthcheckFile">server-enabled.txt</str> -->
</requestHandler>
<!-- Echo the request contents back to the client -->
@ -1038,23 +1109,33 @@
http://wiki.apache.org/solr/SolrReplication
In the example below, remove the <lst name="master"> section if
this is just a slave and remove the <lst name="slave"> section
if this is just a master.
It is also neccessary for SolrCloud to function (in Cloud mode, the
replication handler is used to bulk transfer segments when nodes
are added or need to recover).
https://wiki.apache.org/solr/SolrCloud/
-->
<!--
<requestHandler name="/replication" class="solr.ReplicationHandler" >
<!--
To enable simple master/slave replication, uncomment one of the
sections below, depending on wether this solr instance should be
the "master" or a "slave". If this instance is a "slave" you will
also need to fill in the masterUrl to point to a real machine.
-->
<!--
<lst name="master">
<str name="replicateAfter">commit</str>
<str name="replicateAfter">startup</str>
<str name="confFiles">schema.xml,stopwords.txt</str>
</lst>
-->
<!--
<lst name="slave">
<str name="masterUrl">http://localhost:8983/solr/replication</str>
<str name="masterUrl">http://your-master-hostname:8983/solr</str>
<str name="pollInterval">00:00:60</str>
</lst>
</requestHandler>
-->
</requestHandler>
<!-- Search Components
@ -1114,30 +1195,49 @@
component
-->
<!-- a spellchecker built from a field of the main index, and
written to disk
-->
<!-- a spellchecker built from a field of the main index -->
<lst name="spellchecker">
<str name="name">default</str>
<str name="field">name</str>
<str name="spellcheckIndexDir">spellchecker</str>
<!-- uncomment this to require terms to occur in 1% of the documents
in order to be included in the dictionary
-->
<!--
<str name="classname">solr.DirectSolrSpellChecker</str>
<!-- the spellcheck distance measure used, the default is the internal levenshtein -->
<str name="distanceMeasure">internal</str>
<!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
<float name="accuracy">0.5</float>
<!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
<int name="maxEdits">2</int>
<!-- the minimum shared prefix when enumerating terms -->
<int name="minPrefix">1</int>
<!-- maximum number of inspections per result. -->
<int name="maxInspections">5</int>
<!-- minimum length of a query term to be considered for correction -->
<int name="minQueryLength">4</int>
<!-- maximum threshold of documents a query term can appear to be considered for correction -->
<float name="maxQueryFrequency">0.01</float>
<!-- uncomment this to require suggestions to occur in 1% of the documents
<float name="thresholdTokenFrequency">.01</float>
-->
</lst>
<!-- a spellchecker that can break or combine words. See "/spell" handler below for usage -->
<lst name="spellchecker">
<str name="name">wordbreak</str>
<str name="classname">solr.WordBreakSolrSpellChecker</str>
<str name="field">name</str>
<str name="combineWords">true</str>
<str name="breakWords">true</str>
<int name="maxChanges">10</int>
</lst>
<!-- a spellchecker that uses a different distance measure -->
<!--
<lst name="spellchecker">
<str name="name">jarowinkler</str>
<str name="field">spell</str>
<str name="classname">solr.DirectSolrSpellChecker</str>
<str name="distanceMeasure">
org.apache.lucene.search.spell.JaroWinklerDistance
</str>
<str name="spellcheckIndexDir">spellcheckerJaro</str>
</lst>
-->
@ -1152,9 +1252,8 @@
<lst name="spellchecker">
<str name="name">freq</str>
<str name="field">lowerfilt</str>
<str name="spellcheckIndexDir">spellcheckerFreq</str>
<str name="classname">solr.DirectSolrSpellChecker</str>
<str name="comparatorClass">freq</str>
<str name="buildOnCommit">true</str>
-->
<!-- A spellchecker that reads the list of words from a file -->
@ -1185,9 +1284,21 @@
<requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<str name="df">text</str>
<str name="spellcheck.onlyMorePopular">false</str>
<str name="spellcheck.extendedResults">false</str>
<str name="spellcheck.count">1</str>
<!-- Solr will use suggestions from both the 'default' spellchecker
and from the 'wordbreak' spellchecker and combine them.
collations (re-written queries) can include a combination of
corrections from both spellcheckers -->
<str name="spellcheck.dictionary">default</str>
<str name="spellcheck.dictionary">wordbreak</str>
<str name="spellcheck">on</str>
<str name="spellcheck.extendedResults">true</str>
<str name="spellcheck.count">10</str>
<str name="spellcheck.alternativeTermCount">5</str>
<str name="spellcheck.maxResultsForSuggest">5</str>
<str name="spellcheck.collate">true</str>
<str name="spellcheck.collateExtendedResults">true</str>
<str name="spellcheck.maxCollationTries">10</str>
<str name="spellcheck.maxCollations">5</str>
</lst>
<arr name="last-components">
<str>spellcheck</str>
@ -1221,12 +1332,11 @@
http://wiki.apache.org/solr/ClusteringComponent
This relies on third party jars which are notincluded in the
release. To use this component (and the "/clustering" handler)
Those jars will need to be downloaded, and you'll need to set
the solr.cluster.enabled system property when running solr...
You'll need to set the solr.cluster.enabled system property
when running solr to run with clustering enabled:
java -Dsolr.clustering.enabled=true -jar start.jar
-->
<searchComponent name="clustering"
enable="${solr.clustering.enabled:false}"
@ -1313,7 +1423,6 @@
<!-- produce sub clusters -->
<bool name="carrot.outputSubClusters">false</bool>
<str name="df">text</str>
<str name="defType">edismax</str>
<str name="qf">
text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
@ -1340,6 +1449,7 @@
<requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
<lst name="defaults">
<bool name="terms">true</bool>
<bool name="distrib">false</bool>
</lst>
<arr name="components">
<str>terms</str>
@ -1419,13 +1529,17 @@
<!-- Configure the standard fragListBuilder -->
<fragListBuilder name="simple"
default="true"
class="solr.highlight.SimpleFragListBuilder"/>
<!-- Configure the single fragListBuilder -->
<fragListBuilder name="single"
class="solr.highlight.SingleFragListBuilder"/>
<!-- Configure the weighted fragListBuilder -->
<fragListBuilder name="weighted"
default="true"
class="solr.highlight.WeightedFragListBuilder"/>
<!-- default tag FragmentsBuilder -->
<fragmentsBuilder name="default"
default="true"
@ -1463,17 +1577,10 @@
<boundaryScanner name="breakIterator"
class="solr.highlight.BreakIteratorBoundaryScanner">
<lst name="defaults">
<!-- type should be one of:
* CHARACTER
* WORD (default)
* LINE
* SENTENCE
-->
<!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
<str name="hl.bs.type">WORD</str>
<!-- language and country are used when constructing Locale
object which will be used when getting instance of
BreakIterator
-->
<!-- language and country are used when constructing Locale object. -->
<!-- And the Locale object will be used when getting instance of BreakIterator -->
<str name="hl.bs.language">en</str>
<str name="hl.bs.country">US</str>
</lst>
@ -1513,7 +1620,8 @@
</updateRequestProcessorChain>
-->
<!--
<!-- Language identification
This example update chain identifies the language of the incoming
documents using the langid contrib. The detected language is
written to field language_s. No field name mapping is done.
@ -1534,6 +1642,24 @@
</updateRequestProcessorChain>
-->
<!-- Script update processor
This example hooks in an update processor implemented using JavaScript.
See more about the script update processor at http://wiki.apache.org/solr/ScriptUpdateProcessor
-->
<!--
<updateRequestProcessorChain name="script">
<processor class="solr.StatelessScriptUpdateProcessorFactory">
<str name="script">update-script.js</str>
<lst name="params">
<str name="config_param">example config parameter</str>
</lst>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
-->
<!-- Response Writers
http://wiki.apache.org/solr/QueryResponseWriter
@ -1608,16 +1734,35 @@
class="com.mycompany.MyValueSourceParser" />
-->
<!-- Legacy config for the admin interface -->
<admin>
<defaultQuery>*:*</defaultQuery>
<!-- configure a healthcheck file for servers behind a
loadbalancer
<!-- Document Transformers
http://wiki.apache.org/solr/DocTransformers
-->
<!--
<healthcheck type="file">server-enabled</healthcheck>
Could be something like:
<transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
<int name="connection">jdbc://....</int>
</transformer>
To add a constant value to all docs, use:
<transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
<int name="value">5</int>
</transformer>
If you want the user to still be able to change it with _value:something_ use this:
<transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
<double name="defaultValue">5</double>
</transformer>
If you are using the QueryElevationComponent, you may wish to mark documents that get boosted. The
EditorialMarkerFactory will do exactly that:
<transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
-->
<!-- Legacy config for the admin interface -->
<admin>
<defaultQuery>*:*</defaultQuery>
</admin>
</config>

@ -43,10 +43,10 @@ import net.yacy.server.serverSwitch;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.util.FastWriter;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.search.DocSlice;
import org.apache.solr.util.FastWriter;
// try

@ -47,7 +47,6 @@ import net.yacy.server.serverSwitch;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.FastWriter;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.SolrCore;
import org.apache.solr.request.SolrQueryRequest;
@ -55,6 +54,7 @@ import org.apache.solr.response.QueryResponseWriter;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.response.XSLTResponseWriter;
import org.apache.solr.search.DocSlice;
import org.apache.solr.util.FastWriter;
// try
@ -72,12 +72,14 @@ public class select {
try {solrServlet.init(null);} catch (ServletException e) {}
RESPONSE_WRITER.putAll(SolrCore.DEFAULT_RESPONSE_WRITERS);
XSLTResponseWriter xsltWriter = new XSLTResponseWriter();
OpensearchResponseWriter opensearchResponseWriter = new OpensearchResponseWriter();
@SuppressWarnings("rawtypes")
NamedList initArgs = new NamedList();
xsltWriter.init(initArgs);
RESPONSE_WRITER.put("xslt", xsltWriter); // try i.e. http://localhost:8090/solr/select?q=*:*&start=0&rows=10&wt=xslt&tr=json.xsl
RESPONSE_WRITER.put("exml", new EnhancedXMLResponseWriter());
RESPONSE_WRITER.put("rss", new OpensearchResponseWriter()); //try http://localhost:8090/solr/select?wt=rss&q=olympia&hl=true&hl.fl=text_t,h1,h2
RESPONSE_WRITER.put("rss", opensearchResponseWriter); //try http://localhost:8090/solr/select?wt=rss&q=olympia&hl=true&hl.fl=text_t,h1,h2
RESPONSE_WRITER.put("opensearch", opensearchResponseWriter); //try http://localhost:8090/solr/select?wt=rss&q=olympia&hl=true&hl.fl=text_t,h1,h2
RESPONSE_WRITER.put("yjson", new JsonResponseWriter()); //try http://localhost:8090/solr/select?wt=json&q=olympia&hl=true&hl.fl=text_t,h1,h2
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -45,7 +45,6 @@ import net.yacy.cora.federate.solr.connector.EmbeddedSolrConnector;
import org.apache.lucene.document.Document;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.FastWriter;
import org.apache.solr.core.SolrCore;
import org.apache.solr.request.ServletSolrParams;
import org.apache.solr.request.SolrQueryRequest;
@ -58,6 +57,7 @@ import org.apache.solr.search.DocList;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.servlet.cache.HttpCacheHeaderUtil;
import org.apache.solr.servlet.cache.Method;
import org.apache.solr.util.FastWriter;
import org.mortbay.jetty.Handler;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;

@ -174,7 +174,7 @@ public abstract class SolrServerConnector extends AbstractSolrConnector implemen
public void add(final File file, final String solrId) throws IOException {
final ContentStreamUpdateRequest up = new ContentStreamUpdateRequest("/update/extract");
up.addFile(file);
up.addFile(file, "application/octet-stream");
up.setParam("literal.id", solrId);
up.setParam("uprefix", "attr_");
up.setParam("fmap.content", "attr_content");

@ -29,12 +29,13 @@ import java.util.Set;
import net.yacy.cora.federate.solr.SolrType;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexableField;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.XML;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.QueryResponseWriter;
import org.apache.solr.response.ResultContext;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.schema.DateField;
import org.apache.solr.schema.FieldType;
@ -43,7 +44,6 @@ import org.apache.solr.schema.SchemaField;
import org.apache.solr.schema.TextField;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
import org.apache.solr.search.DocSlice;
import org.apache.solr.search.SolrIndexSearcher;
public class EnhancedXMLResponseWriter implements QueryResponseWriter {
@ -74,9 +74,8 @@ public class EnhancedXMLResponseWriter implements QueryResponseWriter {
assert values.get("responseHeader") != null;
assert values.get("response") != null;
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> responseHeader = (SimpleOrderedMap<Object>) rsp.getResponseHeader();
DocSlice response = (DocSlice) values.get("response");
DocList response = ((ResultContext) values.get("response")).docs;
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> highlighting = (SimpleOrderedMap<Object>) values.get("highlighting");
writeProps(writer, "responseHeader", responseHeader); // this.writeVal("responseHeader", responseHeader);
@ -139,11 +138,11 @@ public class EnhancedXMLResponseWriter implements QueryResponseWriter {
writeTag(writer, "float", "score", Float.toString(score), false);
}
List<Fieldable> fields = doc.getFields();
List<IndexableField> fields = doc.getFields();
int sz = fields.size();
int fidx1 = 0, fidx2 = 0;
while (fidx1 < sz) {
Fieldable value = fields.get(fidx1);
IndexableField value = fields.get(fidx1);
String fieldName = value.name();
fidx2 = fidx1 + 1;
while (fidx2 < sz && fieldName.equals(fields.get(fidx2).name())) {

@ -37,15 +37,16 @@ import net.yacy.peers.operation.yacyVersion;
import net.yacy.search.Switchboard;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexableField;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.XML;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.QueryResponseWriter;
import org.apache.solr.response.ResultContext;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocSlice;
import org.apache.solr.search.DocList;
import org.apache.solr.search.SolrIndexSearcher;
/**
@ -139,9 +140,8 @@ public class GSAResponseWriter implements QueryResponseWriter {
long start = System.currentTimeMillis();
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> responseHeader = (SimpleOrderedMap<Object>) rsp.getResponseHeader();
DocSlice response = (DocSlice) rsp.getValues().get("response");
DocList response = ((ResultContext) rsp.getValues().get("response")).docs;
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> highlighting = (SimpleOrderedMap<Object>) rsp.getValues().get("highlighting");
Map<String, List<String>> snippets = OpensearchResponseWriter.highlighting(highlighting);
@ -214,13 +214,13 @@ public class GSAResponseWriter implements QueryResponseWriter {
for (int i = 0; i < responseCount; i++) {
int id = iterator.nextDoc();
Document doc = searcher.doc(id, SOLR_FIELDS);
List<Fieldable> fields = doc.getFields();
List<IndexableField> fields = doc.getFields();
int fieldc = fields.size();
// pre-scan the fields to get the mime-type
String mime = "";
for (int j = 0; j < fieldc; j++) {
Fieldable value = fields.get(j);
IndexableField value = fields.get(j);
String fieldName = value.name();
if (YaCySchema.content_type.getSolrFieldName().equals(fieldName)) {
mime = value.stringValue();
@ -235,7 +235,7 @@ public class GSAResponseWriter implements QueryResponseWriter {
int size = 0;
boolean title_written = false; // the solr index may contain several; we take only the first which should be the visible tag in <title></title>
for (int j = 0; j < fieldc; j++) {
Fieldable value = fields.get(j);
IndexableField value = fields.get(j);
String fieldName = value.name();
// apply generic matching rule

@ -36,14 +36,15 @@ import net.yacy.cora.protocol.HeaderFramework;
import net.yacy.server.serverObjects;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexableField;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.QueryResponseWriter;
import org.apache.solr.response.ResultContext;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocSlice;
import org.apache.solr.search.DocList;
import org.apache.solr.search.SolrIndexSearcher;
@ -88,9 +89,8 @@ public class JsonResponseWriter implements QueryResponseWriter {
assert values.get("responseHeader") != null;
assert values.get("response") != null;
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> responseHeader = (SimpleOrderedMap<Object>) rsp.getResponseHeader();
DocSlice response = (DocSlice) values.get("response");
DocList response = ((ResultContext) values.get("response")).docs;
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> facetCounts = (SimpleOrderedMap<Object>) values.get("facet_counts");
@SuppressWarnings("unchecked")
@ -124,14 +124,14 @@ public class JsonResponseWriter implements QueryResponseWriter {
writer.write("{\n".toCharArray());
int id = iterator.nextDoc();
Document doc = searcher.doc(id, OpensearchResponseWriter.SOLR_FIELDS);
List<Fieldable> fields = doc.getFields();
List<IndexableField> fields = doc.getFields();
int fieldc = fields.size();
List<String> texts = new ArrayList<String>();
MultiProtocolURI url = null;
String description = "", title = "";
StringBuilder path = new StringBuilder(80);
for (int j = 0; j < fieldc; j++) {
Fieldable value = fields.get(j);
IndexableField value = fields.get(j);
String fieldName = value.name();
// apply generic matching rule

@ -36,15 +36,16 @@ import net.yacy.cora.lod.vocabulary.DublinCore;
import net.yacy.cora.protocol.HeaderFramework;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexableField;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.XML;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.QueryResponseWriter;
import org.apache.solr.response.ResultContext;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocSlice;
import org.apache.solr.search.DocList;
import org.apache.solr.search.SolrIndexSearcher;
public class OpensearchResponseWriter implements QueryResponseWriter {
@ -100,9 +101,8 @@ public class OpensearchResponseWriter implements QueryResponseWriter {
assert values.get("responseHeader") != null;
assert values.get("response") != null;
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> responseHeader = (SimpleOrderedMap<Object>) rsp.getResponseHeader();
DocSlice response = (DocSlice) values.get("response");
DocList response = ((ResultContext) values.get("response")).docs;
@SuppressWarnings("unchecked")
SimpleOrderedMap<Object> highlighting = (SimpleOrderedMap<Object>) values.get("highlighting");
Map<String, List<String>> snippets = highlighting(highlighting);
@ -151,12 +151,12 @@ public class OpensearchResponseWriter implements QueryResponseWriter {
openTag(writer, "item");
int id = iterator.nextDoc();
Document doc = searcher.doc(id, SOLR_FIELDS);
List<Fieldable> fields = doc.getFields();
List<IndexableField> fields = doc.getFields();
int fieldc = fields.size();
List<String> texts = new ArrayList<String>();
String description = "", title = "";
for (int j = 0; j < fieldc; j++) {
Fieldable value = fields.get(j);
IndexableField value = fields.get(j);
String fieldName = value.name();
// apply generic matching rule

@ -72,6 +72,9 @@ import org.apache.solr.common.SolrInputDocument;
public final class Fulltext implements Iterable<byte[]> {
private static final String SOLR_PATH = "solr_40"; // the number should be identical to the number in the property luceneMatchVersion in solrconfig.xml
private static final String SOLR_OLD_PATH[] = new String[]{"solr_36"};
private static final long forcedCommitTimeout = 3000; // wait this time until a next forced commit is executed
// class objects
@ -116,16 +119,20 @@ public final class Fulltext implements Iterable<byte[]> {
}
public void connectLocalSolr(final int commitWithin) throws IOException {
File solrLocation = this.location;
if (solrLocation.getName().equals("default")) solrLocation = solrLocation.getParentFile();
String solrPath = "solr_36";
solrLocation = new File(solrLocation, solrPath); // the number should be identical to the number in the property luceneMatchVersion in solrconfig.xml
File baseLocation = this.location;
if (baseLocation.getName().equals("default")) baseLocation = baseLocation.getParentFile();
File solrLocation = new File(baseLocation, SOLR_PATH);
// migrate old solr to new
for (String oldVersion: SOLR_OLD_PATH) {
File oldLocation = new File(baseLocation, oldVersion);
if (oldLocation.exists()) oldLocation.renameTo(solrLocation);
}
EmbeddedSolrConnector esc = new EmbeddedSolrConnector(solrLocation, new File(new File(Switchboard.getSwitchboard().appPath, "defaults"), "solr"));
esc.setCommitWithinMs(commitWithin);
Version luceneVersion = esc.getConfig().getLuceneVersion("luceneMatchVersion");
String lvn = luceneVersion.name();
int p = lvn.indexOf('_');
assert solrPath.endsWith(lvn.substring(p)) : "luceneVersion = " + lvn + ", solrPath = " + solrPath + ", p = " + p;
assert SOLR_PATH.endsWith(lvn.substring(p)) : "luceneVersion = " + lvn + ", solrPath = " + SOLR_PATH + ", p = " + p + ", check defaults/solr/solrconfig.xml";
Log.logInfo("MetadataRepository", "connected solr in " + solrLocation.toString() + ", lucene version " + lvn);
this.solr.connect0(esc);
}

Loading…
Cancel
Save