summaryrefslogtreecommitdiff
path: root/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java
diff options
context:
space:
mode:
authorJim Ferenczi <jim.ferenczi@elastic.co>2017-05-19 17:11:23 +0200
committerGitHub <noreply@github.com>2017-05-19 17:11:23 +0200
commitd241c4898e5d5ab87284cc3b351989d26947b552 (patch)
tree2aefac4d68554a2a4d0de3d26f8e79414858f14f /modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java
parentb18df27d74664ac3254b8395b67d097b487fd7ce (diff)
Removes parent child fielddata specialization (#24737)
This change removes the field data specialization needed for the parent field and replaces it with a simple DocValuesIndexFieldData. The underlying global ordinals are retrieved via a new function called IndexOrdinalsFieldData#getOrdinalMap. The children aggregation is also modified to use a simple WithOrdinals value source rather than the deleted WithOrdinals.Parent. Relates #20257
Diffstat (limited to 'modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java')
-rw-r--r--modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java23
1 files changed, 10 insertions, 13 deletions
diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java
index c1ffb097ab..93ba1b98da 100644
--- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java
+++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java
@@ -20,7 +20,7 @@ package org.elasticsearch.join.aggregations;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Query;
@@ -52,10 +52,9 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
static final ParseField TYPE_FIELD = new ParseField("type");
- private final String parentType;
private final Weight childFilter;
private final Weight parentFilter;
- private final ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource;
+ private final ValuesSource.Bytes.WithOrdinals valuesSource;
// Maybe use PagedGrowableWriter? This will be less wasteful than LongArray,
// but then we don't have the reuse feature of BigArrays.
@@ -72,12 +71,11 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
private boolean multipleBucketsPerParentOrd = false;
public ParentToChildrenAggregator(String name, AggregatorFactories factories,
- SearchContext context, Aggregator parent, String parentType, Query childFilter,
- Query parentFilter, ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource,
+ SearchContext context, Aggregator parent, Query childFilter,
+ Query parentFilter, ValuesSource.Bytes.WithOrdinals valuesSource,
long maxOrd, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
throws IOException {
super(name, factories, context, parent, pipelineAggregators, metaData);
- this.parentType = parentType;
// these two filters are cached in the parser
this.childFilter = context.searcher().createNormalizedWeight(childFilter, false);
this.parentFilter = context.searcher().createNormalizedWeight(parentFilter, false);
@@ -105,9 +103,7 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
-
- final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx);
- assert globalOrdinals != null;
+ final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx);
Scorer parentScorer = parentFilter.scorer(ctx);
final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer);
return new LeafBucketCollector() {
@@ -115,7 +111,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
@Override
public void collect(int docId, long bucket) throws IOException {
if (parentDocs.get(docId) && globalOrdinals.advanceExact(docId)) {
- long globalOrdinal = globalOrdinals.ordValue();
+ long globalOrdinal = globalOrdinals.nextOrd();
+ assert globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS;
if (globalOrdinal != -1) {
if (parentOrdToBuckets.get(globalOrdinal) == -1) {
parentOrdToBuckets.set(globalOrdinal, bucket);
@@ -147,9 +144,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
DocIdSetIterator childDocsIter = childDocsScorer.iterator();
final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx);
- final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType,
- ctx);
+ final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx);
// Set the scorer, since we now replay only the child docIds
sub.setScorer(new ConstantScoreScorer(null, 1f, childDocsIter));
@@ -161,7 +157,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
continue;
}
if (globalOrdinals.advanceExact(docId)) {
- long globalOrdinal = globalOrdinals.ordValue();
+ long globalOrdinal = globalOrdinals.nextOrd();
+ assert globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS;
long bucketOrd = parentOrdToBuckets.get(globalOrdinal);
if (bucketOrd != -1) {
collectBucket(sub, docId, bucketOrd);