You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
yacy_search_server/defaults/solr/solr.xml

74 lines
2.6 KiB

<?xml version="1.0" encoding="UTF-8" ?>
<!--
<solr persistent="false">
introduced a second core named 'webgraph'. This core will hold the link structure, but is not filled yet. To have the opportunity of a second core, multi-core functionality had to be implemented to the deep-embedded solr: - migrated the solr_40 directory content to a subdirectory 'collection1'; the previously used default core is now called collection1 - added solr_40/webgraph subdirectory as second core - added a servlet configuration for the second core 'webgraph' in /IndexSchema_p.html - added instance handling as addition to solr connections: all solr connectors are now instances of an solr 'instance' object; this required a complete re-design of the solr embedding - migrated also caching and sharding ontop of new instance handling - migrated the search apis to handle now the access to a specific core, the default core named 'collection1' - migrated the remote solr search interface to access shards of cores; for the yacy remote search the default core is now called 'solr'; using the peer address as solr address - migrated the solr backup and restore process: old backups cannot be used after this migration! - redesign of solr instance handling in all methods which access the instances: they cannot hold copies of these instances any more; the must retrieve the actuall connection object every time they want to write to it (this solves also some bugs when switching the index/network) - added another schema 'solr.webgraph.schema', the old solr.keys.list is replaced by solr.collection.schema
12 years ago
<logging enabled="true">
<watcher size="100" threshold="INFO" />
</logging>
<cores adminPath="/admin/cores" defaultCoreName="collection1" host="${host:}" hostPort="${jetty.port:}">
introduced a second core named 'webgraph'. This core will hold the link structure, but is not filled yet. To have the opportunity of a second core, multi-core functionality had to be implemented to the deep-embedded solr: - migrated the solr_40 directory content to a subdirectory 'collection1'; the previously used default core is now called collection1 - added solr_40/webgraph subdirectory as second core - added a servlet configuration for the second core 'webgraph' in /IndexSchema_p.html - added instance handling as addition to solr connections: all solr connectors are now instances of an solr 'instance' object; this required a complete re-design of the solr embedding - migrated also caching and sharding ontop of new instance handling - migrated the search apis to handle now the access to a specific core, the default core named 'collection1' - migrated the remote solr search interface to access shards of cores; for the yacy remote search the default core is now called 'solr'; using the peer address as solr address - migrated the solr backup and restore process: old backups cannot be used after this migration! - redesign of solr instance handling in all methods which access the instances: they cannot hold copies of these instances any more; the must retrieve the actuall connection object every time they want to write to it (this solves also some bugs when switching the index/network) - added another schema 'solr.webgraph.schema', the old solr.keys.list is replaced by solr.collection.schema
12 years ago
<core name="collection1" instanceDir="collection1" />
<core name="webgraph" instanceDir="webgraph" />
</cores>
</solr>
-->
<!--
<solr>
<solrcloud>
<str name="host">127.0.0.1</str>
<int name="hostPort">8983</int>
<str name="hostContext">solr</str>
<bool name="genericCoreNodeNames">true</bool>
<int name="zkClientTimeout">30000</int>
<int name="distribUpdateSoTimeout">600000</int>
<int name="distribUpdateConnTimeout">60000</int>
</solrcloud>
<shardHandlerFactory name="shardHandlerFactory"
class="HttpShardHandlerFactory">
<int name="socketTimeout">600000</int>
<int name="connTimeout">60000</int>
</shardHandlerFactory>
</solr>
-->
<!--old
<solr persistent="${solr.xml.persist:false}">
<cores adminPath="/admin/cores" defaultCoreName="collection1" host="127.0.0.1" hostPort="${hostPort:8983}"
hostContext="${hostContext:solr}" zkClientTimeout="${solr.zkclienttimeout:30000}" shareSchema="${shareSchema:false}"
genericCoreNodeNames="${genericCoreNodeNames:true}">
<core name="core1" instanceDir="core1" shard="${shard:}" collection="${collection:core1}" config="${solrconfig:solrconfig.xml}" schema="${schema:schema.xml}" coreNodeName="${coreNodeName:}"/>
<core name="core2" instanceDir="core2" />
<shardHandlerFactory name="shardHandlerFactory" class="HttpShardHandlerFactory">
<int name="socketTimeout">${socketTimeout:120000}</int>
<int name="connTimeout">${connTimeout:15000}</int>
</shardHandlerFactory>
</cores>
</solr>
-->
<!--new:-->
<solr>
<solrcloud>
<str name="host">127.0.0.1</str>
<int name="hostPort">${hostPort:8983}</int>
<str name="hostContext">${hostContext:solr}</str>
<int name="zkClientTimeout">${solr.zkclienttimeout:30000}</int>
<!--<str name="shareSchema">${shareSchema:false}</str>-->
<bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
</solrcloud>
<shardHandlerFactory name="shardHandlerFactory" class="HttpShardHandlerFactory">
<int name="socketTimeout">${socketTimeout:120000}</int>
<int name="connTimeout">${connTimeout:15000}</int>
</shardHandlerFactory>
</solr>
<!--
In each of "core1" and "core2" directories, there would be a core.properties file that might look like these.
name=core1
shard=${shard:}
collection=${collection:core1}
config=${solrconfig:solrconfig.xml}
schema=${schema:schema.xml}
coreNodeName=${coreNodeName:}
-->