summaryrefslogtreecommitdiff
path: root/core/src/test/java/org/elasticsearch/search/aggregations/bucket
diff options
context:
space:
mode:
authorJim Ferenczi <jim.ferenczi@elastic.co>2017-01-19 16:22:48 +0100
committerGitHub <noreply@github.com>2017-01-19 16:22:48 +0100
commitb781a4a176411433d51e16e4bd23992aacad5f9d (patch)
tree6dd7586d06e2a5db9c7560d1372610fe10c395d0 /core/src/test/java/org/elasticsearch/search/aggregations/bucket
parent3d54258de2279e13765805d26b5d183504941dc3 (diff)
Add unit tests for FiltersAggregator (#22678)
Adds unit tests for the `filters` aggregation. This change also adds an helper to search and reduce any aggregator in a unit test. This is done by dividing a single searcher in sub-searcher, one for each segment. Relates #22278
Diffstat (limited to 'core/src/test/java/org/elasticsearch/search/aggregations/bucket')
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersAggregatorTests.java203
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java6
2 files changed, 206 insertions, 3 deletions
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersAggregatorTests.java
new file mode 100644
index 0000000000..da41657275
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersAggregatorTests.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.index.mapper.KeywordFieldMapper;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.AggregatorTestCase;
+import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder;
+import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregator;
+import org.elasticsearch.search.aggregations.bucket.filters.InternalFilters;
+import org.junit.Before;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+public class FiltersAggregatorTests extends AggregatorTestCase {
+ private MappedFieldType fieldType;
+
+ @Before
+ public void setUpTest() throws Exception {
+ super.setUp();
+ fieldType = new KeywordFieldMapper.KeywordFieldType();
+ fieldType.setHasDocValues(true);
+ fieldType.setIndexOptions(IndexOptions.DOCS);
+ fieldType.setName("field");
+ }
+
+ public void testEmpty() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ indexWriter.close();
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
+ int numFilters = randomIntBetween(1, 10);
+ QueryBuilder[] filters = new QueryBuilder[numFilters];
+ for (int i = 0; i < filters.length; i++) {
+ filters[i] = QueryBuilders.termQuery("field", randomAsciiOfLength(5));
+ }
+ FiltersAggregationBuilder builder = new FiltersAggregationBuilder("test", filters);
+ builder.otherBucketKey("other");
+ InternalFilters response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType);
+ assertEquals(response.getBuckets().size(), numFilters);
+ for (InternalFilters.InternalBucket filter : response.getBuckets()) {
+ assertEquals(filter.getDocCount(), 0);
+ }
+ indexReader.close();
+ directory.close();
+ }
+
+ public void testKeyedFilter() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ Document document = new Document();
+ document.add(new Field("field", "foo", fieldType));
+ indexWriter.addDocument(document);
+ document.clear();
+ document.add(new Field("field", "else", fieldType));
+ indexWriter.addDocument(document);
+ // make sure we have more than one segment to test the merge
+ indexWriter.commit();
+ document.add(new Field("field", "foo", fieldType));
+ indexWriter.addDocument(document);
+ document.clear();
+ document.add(new Field("field", "bar", fieldType));
+ indexWriter.addDocument(document);
+ document.clear();
+ document.add(new Field("field", "foobar", fieldType));
+ indexWriter.addDocument(document);
+ indexWriter.commit();
+ document.clear();
+ document.add(new Field("field", "something", fieldType));
+ indexWriter.addDocument(document);
+ indexWriter.commit();
+ document.clear();
+ document.add(new Field("field", "foobar", fieldType));
+ indexWriter.addDocument(document);
+ indexWriter.close();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
+
+ FiltersAggregator.KeyedFilter[] keys = new FiltersAggregator.KeyedFilter[6];
+ keys[0] = new FiltersAggregator.KeyedFilter("foobar", QueryBuilders.termQuery("field", "foobar"));
+ keys[1] = new FiltersAggregator.KeyedFilter("bar", QueryBuilders.termQuery("field", "bar"));
+ keys[2] = new FiltersAggregator.KeyedFilter("foo", QueryBuilders.termQuery("field", "foo"));
+ keys[3] = new FiltersAggregator.KeyedFilter("foo2", QueryBuilders.termQuery("field", "foo"));
+ keys[4] = new FiltersAggregator.KeyedFilter("same", QueryBuilders.termQuery("field", "foo"));
+ // filter name already present so it should be merge with the previous one ?
+ keys[5] = new FiltersAggregator.KeyedFilter("same", QueryBuilders.termQuery("field", "bar"));
+ FiltersAggregationBuilder builder = new FiltersAggregationBuilder("test", keys);
+ builder.otherBucket(true);
+ builder.otherBucketKey("other");
+ for (boolean doReduce : new boolean[] {true, false}) {
+ final InternalFilters filters;
+ if (doReduce) {
+ filters = searchAndReduce(indexSearcher, new MatchAllDocsQuery(), builder, fieldType);
+ } else {
+ filters = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType);
+ }
+ assertEquals(filters.getBuckets().size(), 7);
+ assertEquals(filters.getBucketByKey("foobar").getDocCount(), 2);
+ assertEquals(filters.getBucketByKey("foo").getDocCount(), 2);
+ assertEquals(filters.getBucketByKey("foo2").getDocCount(), 2);
+ assertEquals(filters.getBucketByKey("bar").getDocCount(), 1);
+ assertEquals(filters.getBucketByKey("same").getDocCount(), 1);
+ assertEquals(filters.getBucketByKey("other").getDocCount(), 2);
+ }
+
+ indexReader.close();
+ directory.close();
+ }
+
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ int numDocs = randomIntBetween(100, 200);
+ int maxTerm = randomIntBetween(10, 50);
+ int[] expectedBucketCount = new int[maxTerm];
+ Document document = new Document();
+ for (int i = 0; i < numDocs; i++) {
+ if (frequently()) {
+ // make sure we have more than one segment to test the merge
+ indexWriter.commit();
+ }
+ int value = randomInt(maxTerm-1);
+ expectedBucketCount[value] += 1;
+ document.add(new Field("field", Integer.toString(value), fieldType));
+ indexWriter.addDocument(document);
+ document.clear();
+ }
+ indexWriter.close();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher indexSearcher = newSearcher(indexReader, true, true);
+ int numFilters = randomIntBetween(1, 10);
+ QueryBuilder[] filters = new QueryBuilder[numFilters];
+ int[] filterTerms = new int[numFilters];
+ int expectedOtherCount = numDocs;
+ Set<Integer> filterSet = new HashSet<>();
+ for (int i = 0; i < filters.length; i++) {
+ int value = randomInt(maxTerm-1);
+ filters[i] = QueryBuilders.termQuery("field", Integer.toString(value));
+ filterTerms[i] = value;
+ if (filterSet.contains(value) == false) {
+ expectedOtherCount -= expectedBucketCount[value];
+ filterSet.add(value);
+ }
+ }
+ FiltersAggregationBuilder builder = new FiltersAggregationBuilder("test", filters);
+ builder.otherBucket(true);
+ builder.otherBucketKey("other");
+
+ for (boolean doReduce : new boolean[] {true, false}) {
+ final InternalFilters response;
+ if (doReduce) {
+ response = searchAndReduce(indexSearcher, new MatchAllDocsQuery(), builder, fieldType);
+ } else {
+ response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType);
+ }
+ List<InternalFilters.InternalBucket> buckets = response.getBuckets();
+ assertEquals(buckets.size(), filters.length+1);
+
+ for (InternalFilters.InternalBucket bucket : buckets) {
+ if ("other".equals(bucket.getKey())) {
+ assertEquals(bucket.getDocCount(), expectedOtherCount);
+ } else {
+ int index = Integer.parseInt(bucket.getKey());
+ assertEquals(bucket.getDocCount(), (long) expectedBucketCount[filterTerms[index]]);
+ }
+ }
+ }
+ indexReader.close();
+ directory.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java
index 91e8566e31..cb3165f2be 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java
@@ -74,7 +74,7 @@ public class TermsAggregatorTests extends AggregatorTestCase {
MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType();
fieldType.setName("string");
fieldType.setHasDocValues(true );
- try (TermsAggregator aggregator = createAggregator(aggregationBuilder, fieldType, indexSearcher)) {
+ try (TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
aggregator.preCollection();
indexSearcher.search(new MatchAllDocsQuery(), aggregator);
aggregator.postCollection();
@@ -94,7 +94,7 @@ public class TermsAggregatorTests extends AggregatorTestCase {
directory.close();
}
- public void testMixLongAndDouble() throws IOException {
+ public void testMixLongAndDouble() throws Exception {
for (TermsAggregatorFactory.ExecutionMode executionMode : TermsAggregatorFactory.ExecutionMode.values()) {
TermsAggregationBuilder aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.LONG)
.executionHint(executionMode.toString())
@@ -190,7 +190,7 @@ public class TermsAggregatorTests extends AggregatorTestCase {
private InternalAggregation buildInternalAggregation(TermsAggregationBuilder builder, MappedFieldType fieldType,
IndexSearcher searcher) throws IOException {
- try (TermsAggregator aggregator = createAggregator(builder, fieldType, searcher)) {
+ try (TermsAggregator aggregator = createAggregator(builder, searcher, fieldType)) {
aggregator.preCollection();
searcher.search(new MatchAllDocsQuery(), aggregator);
aggregator.postCollection();