> lng = TranslatorXliff.loadTranslationsListsFromXliff(langFile);
+ langFile = new File(langPath, Files.getNameWithoutExtension(langFile.getName())+".lng");
+ tx.saveAsLngFile(null, langFile, lng);
+ }
+
+ if (post.containsKey("use_lang") && "on".equals(post.get("use_lang"))) {
+ Translator.changeLang(env, langPath, langFile.getName());
+ }
+ } catch (final IOException e) {
+ prop.put("status", "2");//error saving the language file
+ }
} catch(final IOException e) {
prop.put("status", "1");//unable to get url
prop.put("status_url", url);
- return prop;
- }
- try {
- final File langFile = new File(langPath, url.substring(url.lastIndexOf('/'), url.length()));
- final BufferedWriter bw = new BufferedWriter(new PrintWriter(new FileWriter(langFile)));
-
- while (it.hasNext()) {
- bw.write(it.next() + "\n");
- }
- bw.close();
- } catch(final IOException e) {
- prop.put("status", "2");//error saving the language file
- return prop;
- }
- if (post.containsKey("use_lang") && "on".equals(post.get("use_lang"))) {
- Translator.changeLang(env, langPath, url.substring(url.lastIndexOf('/'), url.length()));
}
}
}
diff --git a/htroot/ConfigRobotsTxt_p.html b/htroot/ConfigRobotsTxt_p.html
index 2b59f2e14..74b5e7775 100644
--- a/htroot/ConfigRobotsTxt_p.html
+++ b/htroot/ConfigRobotsTxt_p.html
@@ -10,7 +10,7 @@
Exclude Web-Spiders
Here you can set up a robots.txt for all webcrawlers that try to access the webinterface of your peer.
- robots.txt is a volunteer agreement most search-engines (including YaCy) follow.
+ robots.txt is a voluntary agreement most search-engines (including YaCy) follow.
It disallows crawlers to access webpages or even entire domains.
#(error)#
@@ -50,4 +50,4 @@