summaryrefslogtreecommitdiff
path: root/core/src/test/java/org/elasticsearch
diff options
context:
space:
mode:
authorMartijn van Groningen <martijn.v.groningen@gmail.com>2017-06-14 01:26:36 +0200
committerMartijn van Groningen <martijn.v.groningen@gmail.com>2017-06-15 18:28:31 +0200
commit428e70758ac6895ac995f4315412f4d3729aea9b (patch)
treebb6404aac053c5ece590214a33e02304c2bab694 /core/src/test/java/org/elasticsearch
parent2a78b0a19fb6584944d92ad34a91f2814b3dcbe4 (diff)
Moved more token filters to analysis-common module.
The following token filters were moved: `edge_ngram`, `ngram`, `uppercase`, `lowercase`, `length`, `flatten_graph` and `unique`. Relates to #23658
Diffstat (limited to 'core/src/test/java/org/elasticsearch')
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/FlattenGraphTokenFilterFactoryTests.java73
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java152
-rw-r--r--core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java49
-rw-r--r--core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java30
-rw-r--r--core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java10
5 files changed, 14 insertions, 300 deletions
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/FlattenGraphTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/FlattenGraphTokenFilterFactoryTests.java
deleted file mode 100644
index 259da010da..0000000000
--- a/core/src/test/java/org/elasticsearch/index/analysis/FlattenGraphTokenFilterFactoryTests.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.analysis;
-
-import java.io.IOException;
-
-import org.apache.lucene.analysis.CannedTokenStream;
-import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenStream;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.Index;
-import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.test.ESTokenStreamTestCase;
-import org.elasticsearch.test.IndexSettingsModule;
-
-public class FlattenGraphTokenFilterFactoryTests extends ESTokenStreamTestCase {
-
- public void testBasic() throws IOException {
-
- Index index = new Index("test", "_na_");
- String name = "ngr";
- Settings indexSettings = newAnalysisSettingsBuilder().build();
- IndexSettings indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings);
- Settings settings = newAnalysisSettingsBuilder().build();
-
- // "wow that's funny" and "what the fudge" are separate side paths, in parallel with "wtf", on input:
- TokenStream in = new CannedTokenStream(0, 12, new Token[] {
- token("wtf", 1, 5, 0, 3),
- token("what", 0, 1, 0, 3),
- token("wow", 0, 3, 0, 3),
- token("the", 1, 1, 0, 3),
- token("fudge", 1, 3, 0, 3),
- token("that's", 1, 1, 0, 3),
- token("funny", 1, 1, 0, 3),
- token("happened", 1, 1, 4, 12)
- });
-
- TokenStream tokens = new FlattenGraphTokenFilterFactory(indexProperties, null, name, settings).create(in);
-
- // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
- assertTokenStreamContents(tokens,
- new String[] {"wtf", "what", "wow", "the", "that's", "fudge", "funny", "happened"},
- new int[] {0, 0, 0, 0, 0, 0, 0, 4},
- new int[] {3, 3, 3, 3, 3, 3, 3, 12},
- new int[] {1, 0, 0, 1, 0, 1, 0, 1},
- new int[] {3, 1, 1, 1, 1, 1, 1, 1},
- 12);
- }
-
- private static Token token(String term, int posInc, int posLength, int startOffset, int endOffset) {
- final Token t = new Token(term, startOffset, endOffset);
- t.setPositionIncrement(posInc);
- t.setPositionLength(posLength);
- return t;
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java
deleted file mode 100644
index 5e1cf2e817..0000000000
--- a/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.analysis;
-
-import org.apache.lucene.analysis.MockTokenizer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
-import org.apache.lucene.analysis.reverse.ReverseStringFilter;
-import org.elasticsearch.Version;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.settings.Settings.Builder;
-import org.elasticsearch.index.Index;
-import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.test.ESTokenStreamTestCase;
-import org.elasticsearch.test.IndexSettingsModule;
-
-import java.io.IOException;
-import java.io.StringReader;
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Random;
-
-import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween;
-import static org.hamcrest.Matchers.instanceOf;
-
-public class NGramTokenizerFactoryTests extends ESTokenStreamTestCase {
- public void testParseTokenChars() {
- final Index index = new Index("test", "_na_");
- final String name = "ngr";
- final Settings indexSettings = newAnalysisSettingsBuilder().build();
- IndexSettings indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings);
- for (String tokenChars : Arrays.asList("letters", "number", "DIRECTIONALITY_UNDEFINED")) {
- final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", tokenChars).build();
- try {
- new NGramTokenizerFactory(indexProperties, null, name, settings).create();
- fail();
- } catch (IllegalArgumentException expected) {
- // OK
- }
- }
- for (String tokenChars : Arrays.asList("letter", " digit ", "punctuation", "DIGIT", "CoNtRoL", "dash_punctuation")) {
- final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", tokenChars).build();
- indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings);
-
- new NGramTokenizerFactory(indexProperties, null, name, settings).create();
- // no exception
- }
- }
-
- public void testNoTokenChars() throws IOException {
- final Index index = new Index("test", "_na_");
- final String name = "ngr";
- final Settings indexSettings = newAnalysisSettingsBuilder().build();
- final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 4).putArray("token_chars", new String[0]).build();
- Tokenizer tokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
- tokenizer.setReader(new StringReader("1.34"));
- assertTokenStreamContents(tokenizer, new String[] {"1.", "1.3", "1.34", ".3", ".34", "34"});
- }
-
- public void testPreTokenization() throws IOException {
- // Make sure that pretokenization works well and that it can be used even with token chars which are supplementary characters
- final Index index = new Index("test", "_na_");
- final String name = "ngr";
- final Settings indexSettings = newAnalysisSettingsBuilder().build();
- Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build();
- Tokenizer tokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
- tokenizer.setReader(new StringReader("Åbc déf g\uD801\uDC00f "));
- assertTokenStreamContents(tokenizer,
- new String[] {"Åb", "Åbc", "bc", "dé", "déf", "éf", "g\uD801\uDC00", "g\uD801\uDC00f", "\uD801\uDC00f"});
- settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build();
- tokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
- tokenizer.setReader(new StringReader(" a!$ 9"));
- assertTokenStreamContents(tokenizer,
- new String[] {" a", " a!", "a!", "a!$", "!$", "!$ ", "$ ", "$ 9", " 9"});
- }
-
- public void testPreTokenizationEdge() throws IOException {
- // Make sure that pretokenization works well and that it can be used even with token chars which are supplementary characters
- final Index index = new Index("test", "_na_");
- final String name = "ngr";
- final Settings indexSettings = newAnalysisSettingsBuilder().build();
- Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build();
- Tokenizer tokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
- tokenizer.setReader(new StringReader("Åbc déf g\uD801\uDC00f "));
- assertTokenStreamContents(tokenizer,
- new String[] {"Åb", "Åbc", "dé", "déf", "g\uD801\uDC00", "g\uD801\uDC00f"});
- settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build();
- tokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
- tokenizer.setReader(new StringReader(" a!$ 9"));
- assertTokenStreamContents(tokenizer,
- new String[] {" a", " a!"});
- }
-
- public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception {
- int iters = scaledRandomIntBetween(20, 100);
- for (int i = 0; i < iters; i++) {
- final Index index = new Index("test", "_na_");
- final String name = "ngr";
- Version v = randomVersion(random());
- Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3);
- boolean reverse = random().nextBoolean();
- if (reverse) {
- builder.put("side", "back");
- }
- Settings settings = builder.build();
- Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
- Tokenizer tokenizer = new MockTokenizer();
- tokenizer.setReader(new StringReader("foo bar"));
- TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create(tokenizer);
- if (reverse) {
- assertThat(edgeNGramTokenFilter, instanceOf(ReverseStringFilter.class));
- } else {
- assertThat(edgeNGramTokenFilter, instanceOf(EdgeNGramTokenFilter.class));
- }
- }
- }
-
-
- private Version randomVersion(Random random) throws IllegalArgumentException, IllegalAccessException {
- Field[] declaredFields = Version.class.getFields();
- List<Field> versionFields = new ArrayList<>();
- for (Field field : declaredFields) {
- if ((field.getModifiers() & Modifier.STATIC) != 0 && field.getName().startsWith("V_") && field.getType() == Version.class) {
- versionFields.add(field);
- }
- }
- return (Version) versionFields.get(random.nextInt(versionFields.size())).get(Version.class);
- }
-
-}
diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
index 9cbd9fc5d7..2bc98b39dc 100644
--- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
+++ b/core/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
@@ -19,7 +19,6 @@
package org.elasticsearch.search.fetch.subphase.highlight;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
-
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchRequestBuilder;
@@ -214,54 +213,6 @@ public class HighlighterSearchIT extends ESIntegTestCase {
assertHighlight(search, 0, "name", 0, startsWith("<em>abc</em> <em>abc</em> <em>abc</em> <em>abc</em>"));
}
- public void testNgramHighlighting() throws IOException {
- assertAcked(prepareCreate("test")
- .addMapping("test",
- "name", "type=text,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer,"
- + "term_vector=with_positions_offsets",
- "name2", "type=text,analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer,"
- + "term_vector=with_positions_offsets")
- .setSettings(Settings.builder()
- .put(indexSettings())
- .put("analysis.filter.my_ngram.max_gram", 20)
- .put("analysis.filter.my_ngram.min_gram", 1)
- .put("analysis.filter.my_ngram.type", "ngram")
- .put("analysis.tokenizer.my_ngramt.max_gram", 20)
- .put("analysis.tokenizer.my_ngramt.min_gram", 1)
- .put("analysis.tokenizer.my_ngramt.token_chars", "letter,digit")
- .put("analysis.tokenizer.my_ngramt.type", "ngram")
- .put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt")
- .put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace")
- .put("analysis.analyzer.name2_index_analyzer.filter", "my_ngram")
- .put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace")));
- client().prepareIndex("test", "test", "1")
- .setSource("name", "logicacmg ehemals avinci - the know how company",
- "name2", "logicacmg ehemals avinci - the know how company").get();
- refresh();
- ensureGreen();
- SearchResponse search = client().prepareSearch().setQuery(matchQuery("name", "logica m"))
- .highlighter(new HighlightBuilder().field("name")).get();
- assertHighlight(search, 0, "name", 0,
- equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"));
-
- search = client().prepareSearch().setQuery(matchQuery("name", "logica ma")).highlighter(new HighlightBuilder().field("name")).get();
- assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"));
-
- search = client().prepareSearch().setQuery(matchQuery("name", "logica")).highlighter(new HighlightBuilder().field("name")).get();
- assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehemals avinci - the know how company"));
-
- search = client().prepareSearch().setQuery(matchQuery("name2", "logica m")).highlighter(new HighlightBuilder().field("name2"))
- .get();
- assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> <em>ehemals</em> avinci - the know how <em>company</em>"));
-
- search = client().prepareSearch().setQuery(matchQuery("name2", "logica ma")).highlighter(new HighlightBuilder().field("name2"))
- .get();
- assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> <em>ehemals</em> avinci - the know how company"));
-
- search = client().prepareSearch().setQuery(matchQuery("name2", "logica")).highlighter(new HighlightBuilder().field("name2")).get();
- assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> ehemals avinci - the know how company"));
- }
-
public void testEnsureNoNegativeOffsets() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1",
diff --git a/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java
index 05a7227636..bd8cfbcaa5 100644
--- a/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java
+++ b/core/src/test/java/org/elasticsearch/search/query/QueryStringIT.java
@@ -19,16 +19,6 @@
package org.elasticsearch.search.query;
-import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
-import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
-import static org.hamcrest.Matchers.containsInAnyOrder;
-import static org.hamcrest.Matchers.containsString;
-import static org.hamcrest.Matchers.equalTo;
-
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
@@ -56,6 +46,16 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
+import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.hamcrest.Matchers.containsInAnyOrder;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
public class QueryStringIT extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
@@ -91,10 +91,6 @@ public class QueryStringIT extends ESIntegTestCase {
resp = client().prepareSearch("test").setQuery(queryStringQuery("Bar")).get();
assertHitCount(resp, 3L);
assertHits(resp.getHits(), "1", "2", "3");
-
- resp = client().prepareSearch("test").setQuery(queryStringQuery("foa")).get();
- assertHitCount(resp, 1L);
- assertHits(resp.getHits(), "3");
}
public void testWithDate() throws Exception {
@@ -161,8 +157,6 @@ public class QueryStringIT extends ESIntegTestCase {
assertHits(resp.getHits(), "1");
resp = client().prepareSearch("test").setQuery(queryStringQuery("Baz")).get();
assertHits(resp.getHits(), "1");
- resp = client().prepareSearch("test").setQuery(queryStringQuery("sbaz")).get();
- assertHits(resp.getHits(), "1");
resp = client().prepareSearch("test").setQuery(queryStringQuery("19")).get();
assertHits(resp.getHits(), "1");
// nested doesn't match because it's hidden
@@ -223,11 +217,11 @@ public class QueryStringIT extends ESIntegTestCase {
indexRandom(true, false, reqs);
SearchResponse resp = client().prepareSearch("test2").setQuery(
- queryStringQuery("foo eggplent").defaultOperator(Operator.AND)).get();
+ queryStringQuery("foo eggplant").defaultOperator(Operator.AND)).get();
assertHitCount(resp, 0L);
resp = client().prepareSearch("test2").setQuery(
- queryStringQuery("foo eggplent").defaultOperator(Operator.AND).useAllFields(true)).get();
+ queryStringQuery("foo eggplant").defaultOperator(Operator.AND).useAllFields(true)).get();
assertHits(resp.getHits(), "1");
assertHitCount(resp, 1L);
diff --git a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java
index f22ec392b9..a32a806037 100644
--- a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java
+++ b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringIT.java
@@ -398,10 +398,6 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("Bar")).get();
assertHitCount(resp, 3L);
assertHits(resp.getHits(), "1", "2", "3");
-
- resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foa")).get();
- assertHitCount(resp, 1L);
- assertHits(resp.getHits(), "3");
}
public void testWithDate() throws Exception {
@@ -480,8 +476,6 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
assertHits(resp.getHits(), "1");
resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("Baz")).get();
assertHits(resp.getHits(), "1");
- resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("sbaz")).get();
- assertHits(resp.getHits(), "1");
resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("19")).get();
assertHits(resp.getHits(), "1");
// nested doesn't match because it's hidden
@@ -547,11 +541,11 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
indexRandom(true, false, reqs);
SearchResponse resp = client().prepareSearch("test").setQuery(
- simpleQueryStringQuery("foo eggplent").defaultOperator(Operator.AND)).get();
+ simpleQueryStringQuery("foo eggplant").defaultOperator(Operator.AND)).get();
assertHitCount(resp, 0L);
resp = client().prepareSearch("test").setQuery(
- simpleQueryStringQuery("foo eggplent").defaultOperator(Operator.AND).useAllFields(true)).get();
+ simpleQueryStringQuery("foo eggplant").defaultOperator(Operator.AND).useAllFields(true)).get();
assertHits(resp.getHits(), "1");
assertHitCount(resp, 1L);