diff options
Diffstat (limited to 'test/framework/src/main/resources')
-rw-r--r-- | test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json | 54 | ||||
-rw-r--r-- | test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml | 39 |
2 files changed, 93 insertions, 0 deletions
diff --git a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json new file mode 100644 index 0000000000..38937a9b5a --- /dev/null +++ b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json @@ -0,0 +1,54 @@ +{ + "index":{ + "analysis":{ + "tokenizer":{ + "standard":{ + "type":"standard" + } + }, + "filter":{ + "stop":{ + "type":"stop", + "stopwords":["test-stop"] + }, + "stop2":{ + "type":"stop", + "stopwords":["stop2-1", "stop2-2"] + }, + "my":{ + "type":"myfilter" + }, + "dict_dec":{ + "type":"dictionary_decompounder", + "word_list":["donau", "dampf", "schiff", "spargel", "creme", "suppe"] + } + }, + "analyzer":{ + "standard":{ + "type":"standard", + "stopwords":["test1", "test2", "test3"] + }, + "custom1":{ + "tokenizer":"standard", + "filter":["stop", "stop2"] + }, + "custom4":{ + "tokenizer":"standard", + "filter":["my"] + }, + "custom6":{ + "tokenizer":"standard", + "position_increment_gap": 256 + }, + "czechAnalyzerWithStemmer":{ + "tokenizer":"standard", + "filter":["standard", "lowercase", "stop", "czech_stem"] + }, + "decompoundingAnalyzer":{ + "tokenizer":"standard", + "filter":["dict_dec"] + } + } + } + } +} diff --git a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml new file mode 100644 index 0000000000..f7a57d14db --- /dev/null +++ b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml @@ -0,0 +1,39 @@ +index : + analysis : + tokenizer : + standard : + type : standard + filter : + stop : + type : stop + stopwords : [test-stop] + stop2 : + type : stop + stopwords : [stop2-1, stop2-2] + my : + type : myfilter + dict_dec : + type : dictionary_decompounder + word_list : [donau, dampf, schiff, spargel, creme, suppe] + analyzer : + standard : + type : standard + stopwords : [test1, test2, test3] + custom1 : + tokenizer : standard + filter : [stop, stop2] + custom4 : + tokenizer : standard + filter : [my] + custom6 : + tokenizer : standard + position_increment_gap: 256 + custom7 : + type : standard + version: 3.6 + czechAnalyzerWithStemmer : + tokenizer : standard + filter : [standard, lowercase, stop, czech_stem] + decompoundingAnalyzer : + tokenizer : standard + filter : [dict_dec] |