diff --git a/htroot/Crawler_p.java b/htroot/Crawler_p.java
index 93d8728ab..55c417784 100644
--- a/htroot/Crawler_p.java
+++ b/htroot/Crawler_p.java
@@ -366,7 +366,7 @@ public class Crawler_p {
boolean obeyHtmlRobotsNofollow = "on".equals(post.get("obeyHtmlRobotsNofollow", "false"));
env.setConfig("obeyHtmlRobotsNofollow", obeyHtmlRobotsNofollow);
- final boolean indexText = "on".equals(post.get("indexText", "false"));
+ final boolean indexText = "on".equals(post.get("indexText", "on"));
env.setConfig("indexText", indexText);
final boolean indexMedia = "on".equals(post.get("indexMedia", "false"));
@@ -536,12 +536,12 @@ public class Crawler_p {
try {
if (newcrawlingdepth > 0) {
if (fullDomain) {
- /* Crawl is restricted to start domains or sub-paths : we have to get all the start links now.
+ /* Crawl is restricted to start domains or sub-paths : we have to get all the start links now.
* Otherwise we can get them asynchronously later, thus allowing to handle more efficiently large start crawlingFiles */
hyperlinks_from_file = crawlingFileStart(crawlingFile, timezoneOffset, crawlingFileContent);
newcrawlingMustMatch = CrawlProfile.siteFilter(hyperlinks_from_file);
} else if (subPath) {
- /* Crawl is restricted to start domains or sub-paths : we have to get all the start links now.
+ /* Crawl is restricted to start domains or sub-paths : we have to get all the start links now.
* Otherwise we can get them asynchronously later, thus allowing to handle more efficiently large start crawlingFiles */
hyperlinks_from_file = crawlingFileStart(crawlingFile, timezoneOffset, crawlingFileContent);
newcrawlingMustMatch = CrawlProfile.subpathFilter(hyperlinks_from_file);
@@ -770,7 +770,7 @@ public class Crawler_p {
/*
* PPM
LF
- MH
+ MH
(min/max)