summaryrefslogtreecommitdiff
path: root/core/src/main/java/org/elasticsearch/search
diff options
context:
space:
mode:
authorRyan Ernst <ryan@iernst.net>2015-12-18 12:24:30 -0800
committerRyan Ernst <ryan@iernst.net>2015-12-18 12:24:30 -0800
commit853e9c0fd10af51e66f5b9d63c5e6b248968c15e (patch)
treeb39f0a1723d582e857181d0cf69c3be23411e544 /core/src/main/java/org/elasticsearch/search
parent0f518e1b07059293b7ab94c5494ff86f531053a4 (diff)
parent10dfa32f9ddc2216be2aabb88462a32ce5900333 (diff)
Merge branch 'master' into wildcard_imports
Diffstat (limited to 'core/src/main/java/org/elasticsearch/search')
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchService.java55
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchShardTarget.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java28
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java16
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java25
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java7
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java70
-rw-r--r--core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java16
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java132
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java294
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java57
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java14
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java9
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java10
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java44
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/SearchContext.java8
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java12
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java11
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java10
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java156
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java135
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java108
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java235
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java113
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java94
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java165
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java158
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java103
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java97
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/Profiler.java130
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/Profilers.java59
-rw-r--r--core/src/main/java/org/elasticsearch/search/query/QueryPhase.java68
-rw-r--r--core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java41
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java7
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java6
54 files changed, 2235 insertions, 336 deletions
diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java
index 9501099997..99f9b0ea0d 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchService.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchService.java
@@ -23,6 +23,7 @@ import com.carrotsearch.hppc.ObjectFloatHashMap;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.ObjectSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
@@ -38,6 +39,8 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
@@ -70,7 +73,6 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.IndicesWarmer.TerminationHandle;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
@@ -83,12 +85,16 @@ import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext.FieldDataField;
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField;
+import org.elasticsearch.search.highlight.HighlightBuilder;
import org.elasticsearch.search.internal.*;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
+import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.*;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool;
+import java.io.IOException;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
@@ -109,9 +115,10 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
public static final String NORMS_LOADING_KEY = "index.norms.loading";
public static final String DEFAULT_KEEPALIVE_KEY = "search.default_keep_alive";
public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval";
- public static final String DEFAULT_SEARCH_TIMEOUT = "search.default_search_timeout";
public static final TimeValue NO_TIMEOUT = timeValueMillis(-1);
+ public static final Setting<TimeValue> DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.CLUSTER);
+
private final ThreadPool threadPool;
@@ -150,7 +157,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
private final ParseFieldMatcher parseFieldMatcher;
@Inject
- public SearchService(Settings settings, NodeSettingsService nodeSettingsService, ClusterService clusterService, IndicesService indicesService,IndicesWarmer indicesWarmer, ThreadPool threadPool,
+ public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool,
ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase,
IndicesRequestCache indicesQueryCache) {
super(settings);
@@ -184,19 +191,12 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
this.indicesWarmer.addListener(new FieldDataWarmer(indicesWarmer));
this.indicesWarmer.addListener(new SearchWarmer());
- defaultSearchTimeout = settings.getAsTime(DEFAULT_SEARCH_TIMEOUT, NO_TIMEOUT);
- nodeSettingsService.addListener(new SearchSettingsListener());
+ defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout);
}
- class SearchSettingsListener implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- final TimeValue maybeNewDefaultSearchTimeout = settings.getAsTime(SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout);
- if (!maybeNewDefaultSearchTimeout.equals(SearchService.this.defaultSearchTimeout)) {
- logger.info("updating [{}] from [{}] to [{}]", SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout, maybeNewDefaultSearchTimeout);
- SearchService.this.defaultSearchTimeout = maybeNewDefaultSearchTimeout;
- }
- }
+ private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) {
+ this.defaultSearchTimeout = defaultSearchTimeout;
}
@Override
@@ -549,7 +549,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
- SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout);
+ DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout);
SearchContext.setCurrent(context);
try {
@@ -558,7 +558,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
context.scrollContext().scroll = request.scroll();
}
if (request.template() != null) {
- ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, context);
+ ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, context, Collections.emptyMap());
BytesReference run = (BytesReference) executable.run();
try (XContentParser parser = XContentFactory.xContent(run).createParser(run)) {
QueryParseContext queryParseContext = new QueryParseContext(indicesService.getIndicesQueryRegistry());
@@ -656,7 +656,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
}
}
- private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchParseException {
+ private void parseSource(DefaultSearchContext context, SearchSourceBuilder source) throws SearchContextException {
// nothing to parse...
if (source == null) {
return;
@@ -712,6 +712,9 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
if (source.minScore() != null) {
context.minimumScore(source.minScore());
}
+ if (source.profile()) {
+ context.setProfilers(new Profilers(context.searcher()));
+ }
context.timeoutInMillis(source.timeoutInMillis());
context.terminateAfter(source.terminateAfter());
if (source.aggregations() != null) {
@@ -807,19 +810,11 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
fieldDataFieldsContext.setHitExecutionNeeded(true);
}
if (source.highlighter() != null) {
- XContentParser highlighterParser = null;
+ HighlightBuilder highlightBuilder = source.highlighter();
try {
- highlighterParser = XContentFactory.xContent(source.highlighter()).createParser(source.highlighter());
- this.elementParsers.get("highlight").parse(highlighterParser, context);
- } catch (Exception e) {
- String sSource = "_na_";
- try {
- sSource = source.toString();
- } catch (Throwable e1) {
- // ignore
- }
- XContentLocation location = highlighterParser != null ? highlighterParser.getTokenLocation() : null;
- throw new SearchParseException(context, "failed to parse suggest source [" + sSource + "]", location, e);
+ context.highlight(highlightBuilder.build(context.indexShard().getQueryShardContext()));
+ } catch (IOException e) {
+ throw new SearchContextException(context, "failed to create SearchContextHighlighter", e);
}
}
if (source.innerHits() != null) {
@@ -841,7 +836,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
}
if (source.scriptFields() != null) {
for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) {
- SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH);
+ SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH, Collections.emptyMap());
context.scriptFields().add(new ScriptField(field.fieldName(), searchScript, field.ignoreFailure()));
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java
index 1a12751d39..c648436c3a 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java
@@ -23,7 +23,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
-import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text;
import java.io.IOException;
@@ -42,8 +42,8 @@ public class SearchShardTarget implements Streamable, Comparable<SearchShardTarg
}
public SearchShardTarget(String nodeId, String index, int shardId) {
- this.nodeId = nodeId == null ? null : new StringAndBytesText(nodeId);
- this.index = new StringAndBytesText(index);
+ this.nodeId = nodeId == null ? null : new Text(nodeId);
+ this.index = new Text(index);
this.shardId = shardId;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
index 742f678f6f..0681996e3e 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
@@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.Queries;
@@ -30,10 +31,13 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.profile.CollectorResult;
+import org.elasticsearch.search.profile.InternalProfileCollector;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -81,8 +85,13 @@ public class AggregationPhase implements SearchPhase {
}
context.aggregations().aggregators(aggregators);
if (!collectors.isEmpty()) {
- final BucketCollector collector = BucketCollector.wrap(collectors);
- collector.preCollection();
+ Collector collector = BucketCollector.wrap(collectors);
+ ((BucketCollector)collector).preCollection();
+ if (context.getProfilers() != null) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_AGGREGATION,
+ // TODO: report on child aggs as well
+ Collections.emptyList());
+ }
context.queryCollectors().put(AggregationPhase.class, collector);
}
} catch (IOException e) {
@@ -116,6 +125,7 @@ public class AggregationPhase implements SearchPhase {
BucketCollector globalsCollector = BucketCollector.wrap(globals);
Query query = Queries.newMatchAllQuery();
Query searchFilter = context.searchFilter(context.types());
+
if (searchFilter != null) {
BooleanQuery filtered = new BooleanQuery.Builder()
.add(query, Occur.MUST)
@@ -124,8 +134,20 @@ public class AggregationPhase implements SearchPhase {
query = filtered;
}
try {
+ final Collector collector;
+ if (context.getProfilers() == null) {
+ collector = globalsCollector;
+ } else {
+ InternalProfileCollector profileCollector = new InternalProfileCollector(
+ globalsCollector, CollectorResult.REASON_AGGREGATION_GLOBAL,
+ // TODO: report on sub collectors
+ Collections.emptyList());
+ collector = profileCollector;
+ // start a new profile with this collector
+ context.getProfilers().addProfiler().setCollector(profileCollector);
+ }
globalsCollector.preCollection();
- context.searcher().search(query, globalsCollector);
+ context.searcher().search(query, collector);
} catch (Exception e) {
throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e);
} finally {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java b/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java
index ee38e2b361..c1c1bff1ad 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java
@@ -25,6 +25,7 @@ import org.apache.lucene.search.Collector;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.stream.StreamSupport;
@@ -99,6 +100,11 @@ public abstract class BucketCollector implements Collector {
}
return false;
}
+
+ @Override
+ public String toString() {
+ return Arrays.toString(collectors);
+ }
};
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java
index 6d9a1edc71..0678338fcf 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.search.aggregations.bucket.children;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.search.*;
@@ -64,9 +65,6 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
private final LongObjectPagedHashMap<long[]> parentOrdToOtherBuckets;
private boolean multipleBucketsPerParentOrd = false;
- // This needs to be a Set to avoid duplicate reader context entries via (#setNextReader(...), it can get invoked multiple times with the same reader context)
- private Set<LeafReaderContext> replay = new LinkedHashSet<>();
-
public ParentToChildrenAggregator(String name, AggregatorFactories factories, AggregationContext aggregationContext,
Aggregator parent, String parentType, Query childFilter, Query parentFilter,
ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource,
@@ -99,17 +97,11 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
- if (replay == null) {
- throw new IllegalStateException();
- }
final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx);
assert globalOrdinals != null;
Scorer parentScorer = parentFilter.scorer(ctx);
final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer);
- if (childFilter.scorer(ctx) != null) {
- replay.add(ctx);
- }
return new LeafBucketCollector() {
@Override
@@ -138,10 +130,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
@Override
protected void doPostCollection() throws IOException {
- final Set<LeafReaderContext> replay = this.replay;
- this.replay = null;
-
- for (LeafReaderContext ctx : replay) {
+ IndexReader indexReader = context().searchContext().searcher().getIndexReader();
+ for (LeafReaderContext ctx : indexReader.leaves()) {
DocIdSetIterator childDocsIter = childFilter.scorer(ctx);
if (childDocsIter == null) {
continue;
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
index da3bc286ff..faca359d76 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
@@ -23,7 +23,6 @@ import org.apache.lucene.util.PriorityQueue;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.rounding.Rounding;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.AggregationExecutionException;
@@ -151,7 +150,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (formatter != ValueFormatter.RAW) {
- Text keyTxt = new StringText(formatter.format(key));
+ Text keyTxt = new Text(formatter.format(key));
if (keyed) {
builder.startObject(keyTxt.string());
} else {
@@ -392,12 +391,14 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
return reducedBuckets;
}
- private void addEmptyBuckets(List<B> list) {
+ private void addEmptyBuckets(List<B> list, ReduceContext reduceContext) {
B lastBucket = null;
ExtendedBounds bounds = emptyBucketInfo.bounds;
ListIterator<B> iter = list.listIterator();
// first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested)
+ InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(emptyBucketInfo.subAggregations),
+ reduceContext);
if (bounds != null) {
B firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null;
if (firstBucket == null) {
@@ -405,7 +406,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
long key = bounds.min;
long max = bounds.max;
while (key <= max) {
- iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter));
+ iter.add(getFactory().createBucket(key, 0,
+ reducedEmptySubAggs,
+ keyed, formatter));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
}
}
@@ -414,7 +417,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
long key = bounds.min;
if (key < firstBucket.key) {
while (key < firstBucket.key) {
- iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter));
+ iter.add(getFactory().createBucket(key, 0,
+ reducedEmptySubAggs,
+ keyed, formatter));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
}
}
@@ -429,7 +434,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
if (lastBucket != null) {
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
while (key < nextBucket.key) {
- iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter));
+ iter.add(getFactory().createBucket(key, 0,
+ reducedEmptySubAggs, keyed,
+ formatter));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
}
assert key == nextBucket.key;
@@ -442,7 +449,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
long max = bounds.max;
while (key <= max) {
- iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter));
+ iter.add(getFactory().createBucket(key, 0,
+ reducedEmptySubAggs, keyed,
+ formatter));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
}
}
@@ -454,7 +463,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
// adding empty buckets if needed
if (minDocCount == 0) {
- addEmptyBuckets(reducedBuckets);
+ addEmptyBuckets(reducedBuckets, reduceContext);
}
if (order == InternalOrder.KEY_ASC) {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java
index 046ca717b9..b6d1d56d07 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java
@@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -82,7 +83,7 @@ public class ScriptHeuristic extends SignificanceHeuristic {
@Override
public void initialize(InternalAggregation.ReduceContext context) {
- searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context);
+ searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context, Collections.emptyMap());
searchScript.setNextVar("_subset_freq", subsetDfHolder);
searchScript.setNextVar("_subset_size", subsetSizeHolder);
searchScript.setNextVar("_superset_freq", supersetDfHolder);
@@ -170,7 +171,7 @@ public class ScriptHeuristic extends SignificanceHeuristic {
}
ExecutableScript searchScript;
try {
- searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context);
+ searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context, Collections.emptyMap());
} catch (Exception e) {
throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. the script [{}] could not be loaded", e, script, heuristicName);
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
index 26c2eee2f6..c270517cd9 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
@@ -91,7 +91,7 @@ public class LongTerms extends InternalTerms<LongTerms, LongTerms.Bucket> {
@Override
public String getKeyAsString() {
- return String.valueOf(term);
+ return formatter.format(term);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
index d39a0335ac..00c6b6b49b 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
@@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -91,7 +92,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement
vars.putAll(firstAggregation.reduceScript.getParams());
}
CompiledScript compiledScript = reduceContext.scriptService().compile(firstAggregation.reduceScript,
- ScriptContext.Standard.AGGS, reduceContext);
+ ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap());
ExecutableScript script = reduceContext.scriptService().executable(compiledScript, vars);
aggregation = script.run();
} else {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java
index 2c1caaa524..6603c6289b 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java
@@ -39,6 +39,7 @@ import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -58,11 +59,11 @@ public class ScriptedMetricAggregator extends MetricsAggregator {
this.params = params;
ScriptService scriptService = context.searchContext().scriptService();
if (initScript != null) {
- scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext()).run();
+ scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext(), Collections.emptyMap()).run();
}
- this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS);
+ this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS, Collections.emptyMap());
if (combineScript != null) {
- this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext());
+ this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext(), Collections.emptyMap());
} else {
this.combineScript = null;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java
index 32b5d7390d..1efd4a7cd2 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java
@@ -19,7 +19,6 @@
package org.elasticsearch.search.aggregations.metrics.tophits;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
@@ -194,7 +193,7 @@ public class TopHitsBuilder extends AbstractAggregationBuilder {
return sourceBuilder;
}
- public BytesReference highlighter() {
+ public HighlightBuilder highlighter() {
return sourceBuilder().highlighter();
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java
index 789f8c961a..e5ccbf6971 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java
@@ -90,7 +90,7 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator {
InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg = (InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation;
List<? extends Bucket> buckets = originalAgg.getBuckets();
- CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext);
+ CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap());
List newBuckets = new ArrayList<>();
for (Bucket bucket : buckets) {
Map<String, Object> vars = new HashMap<>();
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java
index 669a223b21..edc3b4e87c 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java
@@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -88,7 +89,7 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator {
InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg = (InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation;
List<? extends Bucket> buckets = originalAgg.getBuckets();
- CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext);
+ CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap());
List newBuckets = new ArrayList<>();
for (Bucket bucket : buckets) {
Map<String, Object> vars = new HashMap<>();
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java
index 506c9d16d7..a9dcc77ee9 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java
@@ -43,6 +43,7 @@ import org.elasticsearch.search.internal.SearchContext;
import org.joda.time.DateTimeZone;
import java.io.IOException;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -227,7 +228,7 @@ public class ValuesSourceParser<VS extends ValuesSource> {
}
private SearchScript createScript() {
- return input.script == null ? null : context.scriptService().search(context.lookup(), input.script, ScriptContext.Standard.AGGS);
+ return input.script == null ? null : context.scriptService().search(context.lookup(), input.script, ScriptContext.Standard.AGGS, Collections.emptyMap());
}
private static ValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType) {
diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
index 7963b678fb..3ea2d604b8 100644
--- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
@@ -22,6 +22,7 @@ package org.elasticsearch.search.builder;
import com.carrotsearch.hppc.ObjectFloatHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.Version;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
@@ -91,6 +92,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
public static final ParseField RESCORE_FIELD = new ParseField("rescore");
public static final ParseField STATS_FIELD = new ParseField("stats");
public static final ParseField EXT_FIELD = new ParseField("ext");
+ public static final ParseField PROFILE_FIELD = new ParseField("profile");
private static final SearchSourceBuilder PROTOTYPE = new SearchSourceBuilder();
@@ -144,7 +146,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
private List<BytesReference> aggregations;
- private BytesReference highlightBuilder;
+ private HighlightBuilder highlightBuilder;
private BytesReference suggestBuilder;
@@ -158,6 +160,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
private BytesReference ext = null;
+ private boolean profile = false;
+
+
/**
* Constructs a new search source builder.
*/
@@ -405,22 +410,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
* Adds highlight to perform as part of the search.
*/
public SearchSourceBuilder highlighter(HighlightBuilder highlightBuilder) {
- try {
- XContentBuilder builder = XContentFactory.jsonBuilder();
- builder.startObject();
- highlightBuilder.innerXContent(builder);
- builder.endObject();
- this.highlightBuilder = builder.bytes();
- return this;
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
+ this.highlightBuilder = highlightBuilder;
+ return this;
}
/**
- * Gets the bytes representing the hightlighter builder for this request.
+ * Gets the hightlighter builder for this request.
*/
- public BytesReference highlighter() {
+ public HighlightBuilder highlighter() {
return highlightBuilder;
}
@@ -484,6 +481,22 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
/**
+ * Should the query be profiled. Defaults to <tt>false</tt>
+ */
+ public SearchSourceBuilder profile(boolean profile) {
+ this.profile = profile;
+ return this;
+ }
+
+ /**
+ * Return whether to profile query execution, or {@code null} if
+ * unspecified.
+ */
+ public boolean profile() {
+ return profile;
+ }
+
+ /**
* Gets the bytes representing the rescore builders for this request.
*/
public List<BytesReference> rescores() {
@@ -731,6 +744,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
builder.fieldNames = fieldNames;
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
builder.sort(parser.text());
+ } else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) {
+ builder.profile = parser.booleanValue();
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation());
@@ -813,8 +828,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
builder.aggregations = aggregations;
} else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
- XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
- builder.highlightBuilder = xContentBuilder.bytes();
+ builder.highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context);
} else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
builder.innerHitsBuilder = xContentBuilder.bytes();
@@ -940,6 +954,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
builder.field(EXPLAIN_FIELD.getPreferredName(), explain);
}
+ if (profile) {
+ builder.field("profile", true);
+ }
+
if (fetchSourceContext != null) {
builder.field(_SOURCE_FIELD.getPreferredName(), fetchSourceContext);
}
@@ -1012,10 +1030,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
if (highlightBuilder != null) {
- builder.field(HIGHLIGHT_FIELD.getPreferredName());
- XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(highlightBuilder);
- parser.nextToken();
- builder.copyCurrentStructure(parser);
+ this.highlightBuilder.toXContent(builder, params);
}
if (innerHitsBuilder != null) {
@@ -1158,7 +1173,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
builder.from = in.readVInt();
if (in.readBoolean()) {
- builder.highlightBuilder = in.readBytesReference();
+ builder.highlightBuilder = HighlightBuilder.PROTOTYPE.readFrom(in);
}
boolean hasIndexBoost = in.readBoolean();
if (hasIndexBoost) {
@@ -1224,6 +1239,11 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
if (in.readBoolean()) {
builder.ext = in.readBytesReference();
}
+ if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
+ builder.profile = in.readBoolean();
+ } else {
+ builder.profile = false;
+ }
return builder;
}
@@ -1259,7 +1279,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
boolean hasHighlightBuilder = highlightBuilder != null;
out.writeBoolean(hasHighlightBuilder);
if (hasHighlightBuilder) {
- out.writeBytesReference(highlightBuilder);
+ highlightBuilder.writeTo(out);
}
boolean hasIndexBoost = indexBoost != null;
out.writeBoolean(hasIndexBoost);
@@ -1337,13 +1357,16 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
if (hasExt) {
out.writeBytesReference(ext);
}
+ if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
+ out.writeBoolean(profile);
+ }
}
@Override
public int hashCode() {
return Objects.hash(aggregations, explain, fetchSourceContext, fieldDataFields, fieldNames, from,
highlightBuilder, indexBoost, innerHitsBuilder, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields,
- size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version);
+ size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version, profile);
}
@Override
@@ -1376,6 +1399,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
&& Objects.equals(terminateAfter, other.terminateAfter)
&& Objects.equals(timeoutInMillis, other.timeoutInMillis)
&& Objects.equals(trackScores, other.trackScores)
- && Objects.equals(version, other.version);
+ && Objects.equals(version, other.version)
+ && Objects.equals(profile, other.profile);
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
index f76527163c..835e6e7142 100644
--- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
+++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
@@ -43,7 +43,6 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregations;
-import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
@@ -52,9 +51,11 @@ import org.elasticsearch.search.fetch.FetchSearchResultProvider;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHits;
import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.profile.InternalProfileShardResults;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.search.profile.ProfileShardResult;
import java.io.IOException;
import java.util.ArrayList;
@@ -410,6 +411,17 @@ public class SearchPhaseController extends AbstractComponent {
}
}
+ //Collect profile results
+ InternalProfileShardResults shardResults = null;
+ if (!queryResults.isEmpty() && firstResult.profileResults() != null) {
+ Map<String, List<ProfileShardResult>> profileResults = new HashMap<>(queryResults.size());
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
+ String key = entry.value.queryResult().shardTarget().toString();
+ profileResults.put(key, entry.value.queryResult().profileResults());
+ }
+ shardResults = new InternalProfileShardResults(profileResults);
+ }
+
if (aggregations != null) {
List<SiblingPipelineAggregator> pipelineAggregators = firstResult.pipelineAggregators();
if (pipelineAggregators != null) {
@@ -427,7 +439,7 @@ public class SearchPhaseController extends AbstractComponent {
InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore);
- return new InternalSearchResponse(searchHits, aggregations, suggest, timedOut, terminatedEarly);
+ return new InternalSearchResponse(searchHits, aggregations, suggest, shardResults, timedOut, terminatedEarly);
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
index 227141e4dd..04890700be 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
@@ -31,7 +31,7 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.regex.Regex;
-import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
@@ -198,7 +198,7 @@ public class FetchPhase implements SearchPhase {
DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type());
Text typeText;
if (documentMapper == null) {
- typeText = new StringAndBytesText(fieldsVisitor.uid().type());
+ typeText = new Text(fieldsVisitor.uid().type());
} else {
typeText = documentMapper.typeText();
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java
index 7941e17775..2e76a4c370 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java
@@ -20,7 +20,6 @@
package org.elasticsearch.search.fetch.innerhits;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilder;
@@ -266,7 +265,7 @@ public class InnerHitsBuilder implements ToXContent {
return this;
}
- public BytesReference highlighter() {
+ public HighlightBuilder highlighter() {
return sourceBuilder().highlighter();
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java b/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java
index 6dbdcbd589..de1703b5c9 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java
@@ -30,6 +30,7 @@ import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.internal.SearchContext;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -97,9 +98,9 @@ public class ScriptFieldsParseElement implements SearchParseElement {
throw new SearchParseException(context, "must specify a script in script fields", parser.getTokenLocation());
}
- SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH);
+ SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap());
context.scriptFields().add(new ScriptFieldsContext.ScriptField(fieldName, searchScript, ignoreException));
}
}
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java
index d30144f777..b4de465cc7 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java
@@ -22,13 +22,19 @@ package org.elasticsearch.search.highlight;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.search.highlight.HighlightBuilder.Order;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Arrays;
+import java.util.List;
import java.util.Map;
import java.util.Objects;
@@ -74,7 +80,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
protected QueryBuilder<?> highlightQuery;
- protected String order;
+ protected Order order;
protected Boolean highlightFilter;
@@ -213,18 +219,26 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
/**
* The order of fragments per field. By default, ordered by the order in the
* highlighted text. Can be <tt>score</tt>, which then it will be ordered
- * by score of the fragments.
+ * by score of the fragments, or <tt>none</TT>.
*/
- @SuppressWarnings("unchecked")
public HB order(String order) {
- this.order = order;
+ return order(Order.fromString(order));
+ }
+
+ /**
+ * By default, fragments of a field are ordered by the order in the highlighted text.
+ * If set to {@link Order#SCORE}, this changes order to score of the fragments.
+ */
+ @SuppressWarnings("unchecked")
+ public HB order(Order scoreOrdered) {
+ this.order = scoreOrdered;
return (HB) this;
}
/**
- * @return the value set by {@link #order(String)}
+ * @return the value set by {@link #order(Order)}
*/
- public String order() {
+ public Order order() {
return this.order;
}
@@ -391,7 +405,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
builder.field(HIGHLIGHT_QUERY_FIELD.getPreferredName(), highlightQuery);
}
if (order != null) {
- builder.field(ORDER_FIELD.getPreferredName(), order);
+ builder.field(ORDER_FIELD.getPreferredName(), order.toString());
}
if (highlightFilter != null) {
builder.field(HIGHLIGHT_FILTER_FIELD.getPreferredName(), highlightFilter);
@@ -419,6 +433,100 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
}
}
+ /**
+ * Creates a new {@link HighlightBuilder} from the highlighter held by the {@link QueryParseContext}
+ * in {@link org.elasticsearch.common.xcontent.XContent} format
+ *
+ * @param parseContext containing the parser positioned at the structure to be parsed from.
+ * the state on the parser contained in this context will be changed as a side effect of this
+ * method call
+ * @return the new {@link AbstractHighlighterBuilder}
+ */
+ public HB fromXContent(QueryParseContext parseContext) throws IOException {
+ XContentParser parser = parseContext.parser();
+ XContentParser.Token token = parser.currentToken();
+ String currentFieldName = null;
+ HB highlightBuilder = createInstance(parser);
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if (parseContext.parseFieldMatcher().match(currentFieldName, PRE_TAGS_FIELD)) {
+ List<String> preTagsList = new ArrayList<>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ preTagsList.add(parser.text());
+ }
+ highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()]));
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) {
+ List<String> postTagsList = new ArrayList<>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ postTagsList.add(parser.text());
+ }
+ highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()]));
+ } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) {
+ throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName);
+ }
+ } else if (token.isValue()) {
+ if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) {
+ highlightBuilder.order(Order.fromString(parser.text()));
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) {
+ highlightBuilder.highlightFilter(parser.booleanValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) {
+ highlightBuilder.fragmentSize(parser.intValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) {
+ highlightBuilder.numOfFragments(parser.intValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) {
+ highlightBuilder.requireFieldMatch(parser.booleanValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) {
+ highlightBuilder.boundaryMaxScan(parser.intValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) {
+ highlightBuilder.boundaryChars(parser.text().toCharArray());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {
+ highlightBuilder.highlighterType(parser.text());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) {
+ highlightBuilder.fragmenter(parser.text());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) {
+ highlightBuilder.noMatchSize(parser.intValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) {
+ highlightBuilder.forceSource(parser.booleanValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) {
+ highlightBuilder.phraseLimit(parser.intValue());
+ } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) {
+ throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName);
+ }
+ } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
+ if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) {
+ highlightBuilder.options(parser.map());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) {
+ highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder());
+ } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) {
+ throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName);
+ }
+ } else if (currentFieldName != null) {
+ throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName);
+ }
+ }
+
+ if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) {
+ throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set");
+ }
+ return highlightBuilder;
+ }
+
+ /**
+ * @param parser the input parser. Implementing classes might advance the parser depending on the
+ * information they need to instantiate a new instance
+ * @return a new instance
+ */
+ protected abstract HB createInstance(XContentParser parser) throws IOException;
+
+ /**
+ * Implementing subclasses can handle parsing special options depending on the
+ * current token, field name and the parse context.
+ * @return <tt>true</tt> if an option was found and successfully parsed, otherwise <tt>false</tt>
+ */
+ protected abstract boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, XContentParser.Token endMarkerToken) throws IOException;
+
@Override
public final int hashCode() {
return Objects.hash(getClass(), Arrays.hashCode(preTags), Arrays.hashCode(postTags), fragmentSize,
@@ -480,7 +588,9 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
if (in.readBoolean()) {
highlightQuery(in.readQuery());
}
- order(in.readOptionalString());
+ if (in.readBoolean()) {
+ order(Order.PROTOTYPE.readFrom(in));
+ }
highlightFilter(in.readOptionalBoolean());
forceSource(in.readOptionalBoolean());
boundaryMaxScan(in.readOptionalVInt());
@@ -511,7 +621,11 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
if (hasQuery) {
out.writeQuery(highlightQuery);
}
- out.writeOptionalString(order);
+ boolean hasSetOrder = order != null;
+ out.writeBoolean(hasSetOrder);
+ if (hasSetOrder) {
+ order.writeTo(out);
+ }
out.writeOptionalBoolean(highlightFilter);
out.writeOptionalBoolean(forceSource);
out.writeOptionalVInt(boundaryMaxScan);
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java
index 9e86edef47..8ad24b5cb1 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java
@@ -82,7 +82,7 @@ public final class CustomQueryScorer extends QueryScorer {
} else if (query instanceof FiltersFunctionScoreQuery) {
query = ((FiltersFunctionScoreQuery) query).getSubQuery();
extract(query, query.getBoost(), terms);
- } else {
+ } else if (terms.isEmpty()) {
extractWeightedTerms(terms, query, query.getBoost());
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java
index 65702dd24b..b57899b2e1 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java
@@ -33,7 +33,7 @@ import org.apache.lucene.search.vectorhighlight.SimpleFieldFragList;
import org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder;
import org.apache.lucene.search.vectorhighlight.SingleFragListBuilder;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
import org.elasticsearch.search.fetch.FetchSubPhase;
@@ -159,7 +159,7 @@ public class FastVectorHighlighter implements Highlighter {
}
if (fragments != null && fragments.length > 0) {
- return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
@@ -170,7 +170,7 @@ public class FastVectorHighlighter implements Highlighter {
fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.fieldType().names().indexName(),
fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
if (fragments != null && fragments.length > 0) {
- return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java
index e45303ccb5..c0b1aeea3b 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java
@@ -30,11 +30,13 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions;
import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions.Builder;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -42,6 +44,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
+import java.util.Locale;
import java.util.Objects;
import java.util.Set;
@@ -230,117 +233,45 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
}
/**
- * Creates a new {@link HighlightBuilder} from the highlighter held by the {@link QueryParseContext}
- * in {@link org.elasticsearch.common.xcontent.XContent} format
- *
- * @param parseContext
- * the input parse context. The state on the parser contained in
- * this context will be changed as a side effect of this method
- * call
- * @return the new {@link HighlightBuilder}
+ * parse options only present in top level highlight builder (`tags_schema`, `encoder` and nested `fields`)
*/
- public static HighlightBuilder fromXContent(QueryParseContext parseContext) throws IOException {
+ @Override
+ protected boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, Token currentToken) throws IOException {
XContentParser parser = parseContext.parser();
XContentParser.Token token;
- String topLevelFieldName = null;
-
- HighlightBuilder highlightBuilder = new HighlightBuilder();
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- topLevelFieldName = parser.currentName();
- } else if (token == XContentParser.Token.START_ARRAY) {
- if (parseContext.parseFieldMatcher().match(topLevelFieldName, PRE_TAGS_FIELD)) {
- List<String> preTagsList = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- preTagsList.add(parser.text());
- }
- highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()]));
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, POST_TAGS_FIELD)) {
- List<String> postTagsList = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- postTagsList.add(parser.text());
- }
- highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()]));
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) {
- highlightBuilder.useExplicitFieldOrder(true);
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- if (token == XContentParser.Token.START_OBJECT) {
- String highlightFieldName = null;
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- if (highlightFieldName != null) {
- throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field");
- }
- highlightFieldName = parser.currentName();
- } else if (token == XContentParser.Token.START_OBJECT) {
- highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext));
- }
- }
- } else {
- throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field");
- }
- }
- } else {
- throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", topLevelFieldName);
- }
- } else if (token.isValue()) {
- if (parseContext.parseFieldMatcher().match(topLevelFieldName, ORDER_FIELD)) {
- highlightBuilder.order(parser.text());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TAGS_SCHEMA_FIELD)) {
- highlightBuilder.tagsSchema(parser.text());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_FILTER_FIELD)) {
- highlightBuilder.highlightFilter(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENT_SIZE_FIELD)) {
- highlightBuilder.fragmentSize(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NUMBER_OF_FRAGMENTS_FIELD)) {
- highlightBuilder.numOfFragments(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, ENCODER_FIELD)) {
- highlightBuilder.encoder(parser.text());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, REQUIRE_FIELD_MATCH_FIELD)) {
- highlightBuilder.requireFieldMatch(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_MAX_SCAN_FIELD)) {
- highlightBuilder.boundaryMaxScan(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_CHARS_FIELD)) {
- highlightBuilder.boundaryChars(parser.text().toCharArray());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TYPE_FIELD)) {
- highlightBuilder.highlighterType(parser.text());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENTER_FIELD)) {
- highlightBuilder.fragmenter(parser.text());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NO_MATCH_SIZE_FIELD)) {
- highlightBuilder.noMatchSize(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FORCE_SOURCE_FIELD)) {
- highlightBuilder.forceSource(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, PHRASE_LIMIT_FIELD)) {
- highlightBuilder.phraseLimit(parser.intValue());
- } else {
- throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", topLevelFieldName);
- }
- } else if (token == XContentParser.Token.START_OBJECT && topLevelFieldName != null) {
- if (parseContext.parseFieldMatcher().match(topLevelFieldName, OPTIONS_FIELD)) {
- highlightBuilder.options(parser.map());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) {
- String highlightFieldName = null;
+ boolean foundCurrentFieldMatch = false;
+ if (currentToken.isValue()) {
+ if (parseContext.parseFieldMatcher().match(currentFieldName, TAGS_SCHEMA_FIELD)) {
+ tagsSchema(parser.text());
+ foundCurrentFieldMatch = true;
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, ENCODER_FIELD)) {
+ encoder(parser.text());
+ foundCurrentFieldMatch = true;
+ }
+ } else if (currentToken == Token.START_ARRAY && parseContext.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
+ useExplicitFieldOrder(true);
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.START_OBJECT) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
- highlightFieldName = parser.currentName();
- } else if (token == XContentParser.Token.START_OBJECT) {
- highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext));
+ field(HighlightBuilder.Field.PROTOTYPE.fromXContent(parseContext));
}
}
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_QUERY_FIELD)) {
- highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder());
+ foundCurrentFieldMatch = true;
} else {
- throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", topLevelFieldName);
+ throw new ParsingException(parser.getTokenLocation(),
+ "If highlighter fields is an array it must contain objects containing a single field");
}
- } else if (topLevelFieldName != null) {
- throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, topLevelFieldName);
}
+ } else if (currentToken == Token.START_OBJECT && parseContext.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ field(HighlightBuilder.Field.PROTOTYPE.fromXContent(parseContext));
+ }
+ }
+ foundCurrentFieldMatch = true;
}
-
- if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) {
- throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set");
- }
- return highlightBuilder;
+ return foundCurrentFieldMatch;
}
public SearchContextHighlight build(QueryShardContext context) throws IOException {
@@ -378,9 +309,15 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
private static void transferOptions(AbstractHighlighterBuilder highlighterBuilder, SearchContextHighlight.FieldOptions.Builder targetOptionsBuilder, QueryShardContext context) throws IOException {
- targetOptionsBuilder.preTags(highlighterBuilder.preTags);
- targetOptionsBuilder.postTags(highlighterBuilder.postTags);
- targetOptionsBuilder.scoreOrdered("score".equals(highlighterBuilder.order));
+ if (highlighterBuilder.preTags != null) {
+ targetOptionsBuilder.preTags(highlighterBuilder.preTags);
+ }
+ if (highlighterBuilder.postTags != null) {
+ targetOptionsBuilder.postTags(highlighterBuilder.postTags);
+ }
+ if (highlighterBuilder.order != null) {
+ targetOptionsBuilder.scoreOrdered(highlighterBuilder.order == Order.SCORE);
+ }
if (highlighterBuilder.highlightFilter != null) {
targetOptionsBuilder.highlightFilter(highlighterBuilder.highlightFilter);
}
@@ -396,9 +333,15 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
if (highlighterBuilder.boundaryMaxScan != null) {
targetOptionsBuilder.boundaryMaxScan(highlighterBuilder.boundaryMaxScan);
}
- targetOptionsBuilder.boundaryChars(convertCharArray(highlighterBuilder.boundaryChars));
- targetOptionsBuilder.highlighterType(highlighterBuilder.highlighterType);
- targetOptionsBuilder.fragmenter(highlighterBuilder.fragmenter);
+ if (highlighterBuilder.boundaryChars != null) {
+ targetOptionsBuilder.boundaryChars(convertCharArray(highlighterBuilder.boundaryChars));
+ }
+ if (highlighterBuilder.highlighterType != null) {
+ targetOptionsBuilder.highlighterType(highlighterBuilder.highlighterType);
+ }
+ if (highlighterBuilder.fragmenter != null) {
+ targetOptionsBuilder.fragmenter(highlighterBuilder.fragmenter);
+ }
if (highlighterBuilder.noMatchSize != null) {
targetOptionsBuilder.noMatchSize(highlighterBuilder.noMatchSize);
}
@@ -408,7 +351,9 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
if (highlighterBuilder.phraseLimit != null) {
targetOptionsBuilder.phraseLimit(highlighterBuilder.phraseLimit);
}
- targetOptionsBuilder.options(highlighterBuilder.options);
+ if (highlighterBuilder.options != null) {
+ targetOptionsBuilder.options(highlighterBuilder.options);
+ }
if (highlighterBuilder.highlightQuery != null) {
targetOptionsBuilder.highlightQuery(highlighterBuilder.highlightQuery.toQuery(context));
}
@@ -468,6 +413,11 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
}
@Override
+ protected HighlightBuilder createInstance(XContentParser parser) {
+ return new HighlightBuilder();
+ }
+
+ @Override
protected int doHashCode() {
return Objects.hash(encoder, useExplicitFieldOrder, fields);
}
@@ -549,80 +499,36 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
builder.endObject();
}
- private static HighlightBuilder.Field fromXContent(String fieldname, QueryParseContext parseContext) throws IOException {
+ /**
+ * parse options only present in field highlight builder (`fragment_offset`, `matched_fields`)
+ */
+ @Override
+ protected boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, Token currentToken) throws IOException {
XContentParser parser = parseContext.parser();
- XContentParser.Token token;
-
- final HighlightBuilder.Field field = new HighlightBuilder.Field(fieldname);
- String currentFieldName = null;
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- currentFieldName = parser.currentName();
- } else if (token == XContentParser.Token.START_ARRAY) {
- if (parseContext.parseFieldMatcher().match(currentFieldName, PRE_TAGS_FIELD)) {
- List<String> preTagsList = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- preTagsList.add(parser.text());
- }
- field.preTags(preTagsList.toArray(new String[preTagsList.size()]));
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) {
- List<String> postTagsList = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- postTagsList.add(parser.text());
- }
- field.postTags(postTagsList.toArray(new String[postTagsList.size()]));
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD)) {
- List<String> matchedFields = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- matchedFields.add(parser.text());
- }
- field.matchedFields(matchedFields.toArray(new String[matchedFields.size()]));
- } else {
- throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName);
- }
- } else if (token.isValue()) {
- if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) {
- field.fragmentSize(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) {
- field.numOfFragments(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD)) {
- field.fragmentOffset(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) {
- field.highlightFilter(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) {
- field.order(parser.text());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) {
- field.requireFieldMatch(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) {
- field.boundaryMaxScan(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) {
- field.boundaryChars(parser.text().toCharArray());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {
- field.highlighterType(parser.text());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) {
- field.fragmenter(parser.text());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) {
- field.noMatchSize(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) {
- field.forceSource(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) {
- field.phraseLimit(parser.intValue());
- } else {
- throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName);
- }
- } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
- if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) {
- field.highlightQuery(parseContext.parseInnerQueryBuilder());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) {
- field.options(parser.map());
- } else {
- throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName);
- }
- } else if (currentFieldName != null) {
- throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName);
+ boolean foundCurrentFieldMatch = false;
+ if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD) && currentToken.isValue()) {
+ fragmentOffset(parser.intValue());
+ foundCurrentFieldMatch = true;
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD)
+ && currentToken == XContentParser.Token.START_ARRAY) {
+ List<String> matchedFields = new ArrayList<>();
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ matchedFields.add(parser.text());
}
+ matchedFields(matchedFields.toArray(new String[matchedFields.size()]));
+ foundCurrentFieldMatch = true;
+ }
+ return foundCurrentFieldMatch;
+ }
+
+ @Override
+ protected Field createInstance(XContentParser parser) throws IOException {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ String fieldname = parser.currentName();
+ return new Field(fieldname);
+ } else {
+ throw new ParsingException(parser.getTokenLocation(), "unknown token type [{}], expected field name", parser.currentToken());
}
- return field;
}
@Override
@@ -654,4 +560,36 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
writeOptionsTo(out);
}
}
+
+ public enum Order implements Writeable<Order> {
+ NONE, SCORE;
+
+ static Order PROTOTYPE = NONE;
+
+ @Override
+ public Order readFrom(StreamInput in) throws IOException {
+ int ordinal = in.readVInt();
+ if (ordinal < 0 || ordinal >= values().length) {
+ throw new IOException("Unknown Order ordinal [" + ordinal + "]");
+ }
+ return values()[ordinal];
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(this.ordinal());
+ }
+
+ public static Order fromString(String order) {
+ if (order.toUpperCase(Locale.ROOT).equals(SCORE.name())) {
+ return Order.SCORE;
+ }
+ return NONE;
+ }
+
+ @Override
+ public String toString() {
+ return name().toLowerCase(Locale.ROOT);
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java
index 9077278d51..30530b697f 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java
@@ -22,7 +22,6 @@ package org.elasticsearch.search.highlight;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import java.io.IOException;
@@ -90,7 +89,7 @@ public class HighlightField implements Streamable {
if (in.readBoolean()) {
int size = in.readVInt();
if (size == 0) {
- fragments = StringText.EMPTY_ARRAY;
+ fragments = Text.EMPTY_ARRAY;
} else {
fragments = new Text[size];
for (int i = 0; i < size; i++) {
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java
index 041ed754d7..5f4cdddb06 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java
@@ -33,9 +33,7 @@ import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.search.highlight.TextFragment;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.CollectionUtil;
-import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ExceptionsHelper;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
@@ -158,7 +156,7 @@ public class PlainHighlighter implements Highlighter {
}
if (fragments.length > 0) {
- return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
@@ -172,7 +170,7 @@ public class PlainHighlighter implements Highlighter {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
if (end > 0) {
- return new HighlightField(highlighterContext.fieldName, new Text[] { new StringText(fieldContents.substring(0, end)) });
+ return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) });
}
}
return null;
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java
index e11840e89e..2509f95da5 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java
@@ -28,7 +28,7 @@ import org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator;
import org.apache.lucene.search.postingshighlight.Snippet;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
import org.elasticsearch.search.fetch.FetchSubPhase;
@@ -122,7 +122,7 @@ public class PostingsHighlighter implements Highlighter {
}
if (fragments.length > 0) {
- return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
return null;
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
index 0a9b860edb..a7bacb64d9 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
@@ -26,6 +26,9 @@ import org.apache.lucene.search.*;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.search.dfs.AggregatedDfs;
+import org.elasticsearch.search.profile.ProfileBreakdown;
+import org.elasticsearch.search.profile.ProfileWeight;
+import org.elasticsearch.search.profile.Profiler;
import java.io.IOException;
@@ -43,26 +46,44 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
private final Engine.Searcher engineSearcher;
- public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) {
+ // TODO revisit moving the profiler to inheritance or wrapping model in the future
+ private Profiler profiler;
+
+ public ContextIndexSearcher(Engine.Searcher searcher,
+ QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) {
super(searcher.reader());
in = searcher.searcher();
engineSearcher = searcher;
setSimilarity(searcher.searcher().getSimilarity(true));
- setQueryCache(searchContext.getQueryCache());
- setQueryCachingPolicy(searchContext.indexShard().getQueryCachingPolicy());
+ setQueryCache(queryCache);
+ setQueryCachingPolicy(queryCachingPolicy);
}
@Override
public void close() {
}
+ public void setProfiler(Profiler profiler) {
+ this.profiler = profiler;
+ }
+
public void setAggregatedDfs(AggregatedDfs aggregatedDfs) {
this.aggregatedDfs = aggregatedDfs;
}
@Override
public Query rewrite(Query original) throws IOException {
- return in.rewrite(original);
+ if (profiler != null) {
+ profiler.startRewriteTime();
+ }
+
+ try {
+ return in.rewrite(original);
+ } finally {
+ if (profiler != null) {
+ profiler.stopAndAddRewriteTime();
+ }
+ }
}
@Override
@@ -72,8 +93,34 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
if (aggregatedDfs != null && needsScores) {
// if scores are needed and we have dfs data then use it
return super.createNormalizedWeight(query, needsScores);
+ } else if (profiler != null) {
+ // we need to use the createWeight method to insert the wrappers
+ return super.createNormalizedWeight(query, needsScores);
+ } else {
+ return in.createNormalizedWeight(query, needsScores);
+ }
+ }
+
+ @Override
+ public Weight createWeight(Query query, boolean needsScores) throws IOException {
+ if (profiler != null) {
+ // createWeight() is called for each query in the tree, so we tell the queryProfiler
+ // each invocation so that it can build an internal representation of the query
+ // tree
+ ProfileBreakdown profile = profiler.getQueryBreakdown(query);
+ profile.startTime(ProfileBreakdown.TimingType.CREATE_WEIGHT);
+ final Weight weight;
+ try {
+ weight = super.createWeight(query, needsScores);
+ } finally {
+ profile.stopAndRecordTime();
+ profiler.pollLastQuery();
+ }
+ return new ProfileWeight(query, weight, profile);
+ } else {
+ // needs to be 'super', not 'in' in order to use aggregated DFS
+ return super.createWeight(query, needsScores);
}
- return in.createNormalizedWeight(query, needsScores);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
index 1174fcdd8a..2d3f659062 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
@@ -58,6 +58,8 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
+import org.elasticsearch.search.profile.Profiler;
+import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
@@ -129,10 +131,10 @@ public class DefaultSearchContext extends SearchContext {
private List<RescoreSearchContext> rescore;
private SearchLookup searchLookup;
private volatile long keepAlive;
- private ScoreDoc lastEmittedDoc;
private final long originNanoTime = System.nanoTime();
private volatile long lastAccessTime = -1;
private InnerHitsContext innerHitsContext;
+ private Profilers profilers;
private final Map<String, FetchSubPhaseContext> subPhaseContexts = new HashMap<>();
private final Map<Class<?>, Collector> queryCollectors = new HashMap<>();
@@ -158,7 +160,7 @@ public class DefaultSearchContext extends SearchContext {
this.fetchResult = new FetchSearchResult(id, shardTarget);
this.indexShard = indexShard;
this.indexService = indexService;
- this.searcher = new ContextIndexSearcher(this, engineSearcher);
+ this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
this.timeEstimateCounter = timeEstimateCounter;
this.timeoutInMillis = timeout.millis();
}
@@ -724,5 +726,11 @@ public class DefaultSearchContext extends SearchContext {
}
@Override
- public QueryCache getQueryCache() { return indexService.cache().query();}
+ public Profilers getProfilers() {
+ return profilers;
+ }
+
+ public void setProfilers(Profilers profilers) {
+ this.profilers = profilers;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java
index 7225c7b32b..1f04d01340 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java
@@ -29,7 +29,6 @@ import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
-import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
@@ -49,6 +48,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
@@ -517,8 +517,11 @@ public abstract class FilteredSearchContext extends SearchContext {
}
@Override
- public Map<Class<?>, Collector> queryCollectors() { return in.queryCollectors();}
+ public Profilers getProfilers() {
+ return in.getProfilers();
+ }
@Override
- public QueryCache getQueryCache() { return in.getQueryCache();}
+ public Map<Class<?>, Collector> queryCollectors() { return in.queryCollectors();}
+
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
index 96fd103fa6..fcac5b1cc8 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
@@ -30,7 +30,7 @@ import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
-import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -104,14 +104,14 @@ public class InternalSearchHit implements SearchHit {
public InternalSearchHit(int docId, String id, Text type, Map<String, SearchHitField> fields) {
this.docId = docId;
- this.id = new StringAndBytesText(id);
+ this.id = new Text(id);
this.type = type;
this.fields = fields;
}
public InternalSearchHit(int nestedTopDocId, String id, Text type, InternalNestedIdentity nestedIdentity, Map<String, SearchHitField> fields) {
this.docId = nestedTopDocId;
- this.id = new StringAndBytesText(id);
+ this.id = new Text(id);
this.type = type;
this.nestedIdentity = nestedIdentity;
this.fields = fields;
@@ -339,7 +339,7 @@ public class InternalSearchHit implements SearchHit {
if (sortValues != null) {
for (int i = 0; i < sortValues.length; i++) {
if (sortValues[i] instanceof BytesRef) {
- sortValuesCopy[i] = new StringAndBytesText(new BytesArray((BytesRef) sortValues[i]));
+ sortValuesCopy[i] = new Text(new BytesArray((BytesRef) sortValues[i]));
}
}
}
@@ -783,7 +783,7 @@ public class InternalSearchHit implements SearchHit {
private InternalNestedIdentity child;
public InternalNestedIdentity(String field, int offset, InternalNestedIdentity child) {
- this.field = new StringAndBytesText(field);
+ this.field = new Text(field);
this.offset = offset;
this.child = child;
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
index 7b73772f9d..b8255e0bb5 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
@@ -28,9 +28,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.profile.InternalProfileShardResults;
+import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits;
@@ -40,7 +45,7 @@ import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHit
public class InternalSearchResponse implements Streamable, ToXContent {
public static InternalSearchResponse empty() {
- return new InternalSearchResponse(InternalSearchHits.empty(), null, null, false, null);
+ return new InternalSearchResponse(InternalSearchHits.empty(), null, null, null, false, null);
}
private InternalSearchHits hits;
@@ -49,6 +54,8 @@ public class InternalSearchResponse implements Streamable, ToXContent {
private Suggest suggest;
+ private InternalProfileShardResults profileResults;
+
private boolean timedOut;
private Boolean terminatedEarly = null;
@@ -56,10 +63,12 @@ public class InternalSearchResponse implements Streamable, ToXContent {
private InternalSearchResponse() {
}
- public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest, boolean timedOut, Boolean terminatedEarly) {
+ public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest,
+ InternalProfileShardResults profileResults, boolean timedOut, Boolean terminatedEarly) {
this.hits = hits;
this.aggregations = aggregations;
this.suggest = suggest;
+ this.profileResults = profileResults;
this.timedOut = timedOut;
this.terminatedEarly = terminatedEarly;
}
@@ -84,6 +93,19 @@ public class InternalSearchResponse implements Streamable, ToXContent {
return suggest;
}
+ /**
+ * Returns the profile results for this search response (including all shards).
+ * An empty map is returned if profiling was not enabled
+ *
+ * @return Profile results
+ */
+ public Map<String, List<ProfileShardResult>> profile() {
+ if (profileResults == null) {
+ return Collections.emptyMap();
+ }
+ return profileResults.getShardResults();
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
hits.toXContent(builder, params);
@@ -93,6 +115,9 @@ public class InternalSearchResponse implements Streamable, ToXContent {
if (suggest != null) {
suggest.toXContent(builder, params);
}
+ if (profileResults != null) {
+ profileResults.toXContent(builder, params);
+ }
return builder;
}
@@ -114,6 +139,12 @@ public class InternalSearchResponse implements Streamable, ToXContent {
timedOut = in.readBoolean();
terminatedEarly = in.readOptionalBoolean();
+
+ if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
+ profileResults = new InternalProfileShardResults(in);
+ } else {
+ profileResults = null;
+ }
}
@Override
@@ -134,5 +165,14 @@ public class InternalSearchResponse implements Streamable, ToXContent {
out.writeBoolean(timedOut);
out.writeOptionalBoolean(terminatedEarly);
+
+ if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
+ if (profileResults == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ profileResults.writeTo(out);
+ }
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java
index 0f61b2bc6a..4e4e9dd5dd 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java
@@ -35,7 +35,6 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
-import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
@@ -56,6 +55,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
@@ -304,6 +304,11 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple
public abstract FetchSearchResult fetchResult();
/**
+ * Return a handle over the profilers for the current search request, or {@code null} if profiling is not enabled.
+ */
+ public abstract Profilers getProfilers();
+
+ /**
* Schedule the release of a resource. The time when {@link Releasable#close()} will be called on this object
* is function of the provided {@link Lifetime}.
*/
@@ -367,5 +372,4 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple
CONTEXT
}
- public abstract QueryCache getQueryCache();
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
index 47791aeddf..9d15dfd579 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
@@ -71,6 +71,8 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
private Boolean requestCache;
private long nowInMillis;
+ private boolean profile;
+
ShardSearchLocalRequest() {
}
@@ -165,6 +167,16 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
return scroll;
}
+ @Override
+ public void setProfile(boolean profile) {
+ this.profile = profile;
+ }
+
+ @Override
+ public boolean isProfile() {
+ return profile;
+ }
+
@SuppressWarnings("unchecked")
protected void innerReadFrom(StreamInput in) throws IOException {
index = in.readString();
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java
index fb631b0827..b1730b6a14 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java
@@ -60,6 +60,17 @@ public interface ShardSearchRequest extends HasContextAndHeaders {
Scroll scroll();
/**
+ * Sets if this shard search needs to be profiled or not
+ * @param profile True if the shard should be profiled
+ */
+ void setProfile(boolean profile);
+
+ /**
+ * Returns true if this shard search is being profiled or not
+ */
+ boolean isProfile();
+
+ /**
* Returns the cache key for this shard search request, based on its content
*/
BytesReference cacheKey() throws IOException;
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java
index 279d9d6bd2..0f9c0ced41 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java
@@ -150,4 +150,14 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
public BytesReference cacheKey() throws IOException {
return shardSearchLocalRequest.cacheKey();
}
+
+ @Override
+ public void setProfile(boolean profile) {
+ shardSearchLocalRequest.setProfile(profile);
+ }
+
+ @Override
+ public boolean isProfile() {
+ return shardSearchLocalRequest.isProfile();
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java b/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java
new file mode 100644
index 0000000000..4949c6388d
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * Public interface and serialization container for profiled timings of the
+ * Collectors used in the search. Children CollectorResult's may be
+ * embedded inside of a parent CollectorResult
+ */
+public class CollectorResult implements ToXContent, Writeable {
+
+ public static final String REASON_SEARCH_COUNT = "search_count";
+ public static final String REASON_SEARCH_TOP_HITS = "search_top_hits";
+ public static final String REASON_SEARCH_TERMINATE_AFTER_COUNT = "search_terminate_after_count";
+ public static final String REASON_SEARCH_POST_FILTER = "search_post_filter";
+ public static final String REASON_SEARCH_MIN_SCORE = "search_min_score";
+ public static final String REASON_SEARCH_MULTI = "search_multi";
+ public static final String REASON_SEARCH_TIMEOUT = "search_timeout";
+ public static final String REASON_AGGREGATION = "aggregation";
+ public static final String REASON_AGGREGATION_GLOBAL = "aggregation_global";
+
+ private static final ParseField NAME = new ParseField("name");
+ private static final ParseField REASON = new ParseField("reason");
+ private static final ParseField TIME = new ParseField("time");
+ private static final ParseField CHILDREN = new ParseField("children");
+
+ /**
+ * A more friendly representation of the Collector's class name
+ */
+ private final String collectorName;
+
+ /**
+ * A "hint" to help provide some context about this Collector
+ */
+ private final String reason;
+
+ /**
+ * The total elapsed time for this Collector
+ */
+ private final Long time;
+
+ /**
+ * A list of children collectors "embedded" inside this collector
+ */
+ private List<CollectorResult> children;
+
+ public CollectorResult(String collectorName, String reason, Long time, List<CollectorResult> children) {
+ this.collectorName = collectorName;
+ this.reason = reason;
+ this.time = time;
+ this.children = children;
+ }
+
+ public CollectorResult(StreamInput in) throws IOException {
+ this.collectorName = in.readString();
+ this.reason = in.readString();
+ this.time = in.readLong();
+ int size = in.readVInt();
+ this.children = new ArrayList<>(size);
+ for (int i = 0; i < size; i++) {
+ CollectorResult child = new CollectorResult(in);
+ this.children.add(child);
+ }
+ }
+
+ /**
+ * @return the profiled time for this collector (inclusive of children)
+ */
+ public long getTime() {
+ return this.time;
+ }
+
+ /**
+ * @return a human readable "hint" about what this collector was used for
+ */
+ public String getReason() {
+ return this.reason;
+ }
+
+ /**
+ * @return the lucene class name of the collector
+ */
+ public String getName() {
+ return this.collectorName;
+ }
+
+ /**
+ * @return a list of children collectors
+ */
+ public List<CollectorResult> getProfiledChildren() {
+ return children;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder = builder.startObject()
+ .field(NAME.getPreferredName(), toString())
+ .field(REASON.getPreferredName(), reason)
+ .field(TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double) (getTime() / 1000000.0)));
+
+ if (!children.isEmpty()) {
+ builder = builder.startArray(CHILDREN.getPreferredName());
+ for (CollectorResult child : children) {
+ builder = child.toXContent(builder, params);
+ }
+ builder = builder.endArray();
+ }
+ builder = builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(collectorName);
+ out.writeString(reason);
+ out.writeLong(time);
+ out.writeVInt(children.size());
+ for (CollectorResult child : children) {
+ child.writeTo(out);
+ }
+ }
+
+ @Override
+ public Object readFrom(StreamInput in) throws IOException {
+ return new CollectorResult(in);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java
new file mode 100644
index 0000000000..132731f37c
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.LeafCollector;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * This class wraps a Lucene Collector and times the execution of:
+ * - setScorer()
+ * - collect()
+ * - doSetNextReader()
+ * - needsScores()
+ *
+ * InternalProfiler facilitates the linking of the the Collector graph
+ */
+public class InternalProfileCollector implements Collector {
+
+ /**
+ * A more friendly representation of the Collector's class name
+ */
+ private final String collectorName;
+
+ /**
+ * A "hint" to help provide some context about this Collector
+ */
+ private final String reason;
+
+ /** The wrapped collector */
+ private final ProfileCollector collector;
+
+ /**
+ * A list of "embedded" children collectors
+ */
+ private final List<InternalProfileCollector> children;
+
+ public InternalProfileCollector(Collector collector, String reason, List<InternalProfileCollector> children) {
+ this.collector = new ProfileCollector(collector);
+ this.reason = reason;
+ this.collectorName = deriveCollectorName(collector);
+ this.children = children;
+ }
+
+ /**
+ * @return the profiled time for this collector (inclusive of children)
+ */
+ public long getTime() {
+ return collector.getTime();
+ }
+
+ /**
+ * @return a human readable "hint" about what this collector was used for
+ */
+ public String getReason() {
+ return this.reason;
+ }
+
+ /**
+ * @return the lucene class name of the collector
+ */
+ public String getName() {
+ return this.collectorName;
+ }
+
+ /**
+ * Creates a human-friendly representation of the Collector name.
+ *
+ * Bucket Collectors use the aggregation name in their toString() method,
+ * which makes the profiled output a bit nicer.
+ *
+ * @param c The Collector to derive a name from
+ * @return A (hopefully) prettier name
+ */
+ private String deriveCollectorName(Collector c) {
+ String s = c.getClass().getSimpleName();
+
+ // MutiCollector which wraps multiple BucketCollectors is generated
+ // via an anonymous class, so this corrects the lack of a name by
+ // asking the enclosingClass
+ if (s.equals("")) {
+ s = c.getClass().getEnclosingClass().getSimpleName();
+ }
+
+ // Aggregation collector toString()'s include the user-defined agg name
+ if (reason.equals(CollectorResult.REASON_AGGREGATION) || reason.equals(CollectorResult.REASON_AGGREGATION_GLOBAL)) {
+ s += ": [" + c.toString() + "]";
+ }
+ return s;
+ }
+
+ @Override
+ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
+ return collector.getLeafCollector(context);
+ }
+
+ @Override
+ public boolean needsScores() {
+ return collector.needsScores();
+ }
+
+ public CollectorResult getCollectorTree() {
+ return InternalProfileCollector.doGetCollectorTree(this);
+ }
+
+ private static CollectorResult doGetCollectorTree(InternalProfileCollector collector) {
+ List<CollectorResult> childResults = new ArrayList<>(collector.children.size());
+ for (InternalProfileCollector child : collector.children) {
+ CollectorResult result = doGetCollectorTree(child);
+ childResults.add(result);
+ }
+ return new CollectorResult(collector.getName(), collector.getReason(), collector.getTime(), childResults);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java
new file mode 100644
index 0000000000..e6052ff509
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java
@@ -0,0 +1,108 @@
+package org.elasticsearch.search.profile;
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.stream.Collectors;
+
+/**
+ * A container class to hold all the profile results across all shards. Internally
+ * holds a map of shard ID -&gt; Profiled results
+ */
+public final class InternalProfileShardResults implements Writeable<InternalProfileShardResults>, ToXContent{
+
+ private Map<String, List<ProfileShardResult>> shardResults;
+
+ public InternalProfileShardResults(Map<String, List<ProfileShardResult>> shardResults) {
+ Map<String, List<ProfileShardResult>> transformed =
+ shardResults.entrySet()
+ .stream()
+ .collect(Collectors.toMap(
+ Map.Entry::getKey,
+ e -> Collections.unmodifiableList(e.getValue()))
+ );
+ this.shardResults = Collections.unmodifiableMap(transformed);
+ }
+
+ public InternalProfileShardResults(StreamInput in) throws IOException {
+ int size = in.readInt();
+ shardResults = new HashMap<>(size);
+
+ for (int i = 0; i < size; i++) {
+ String key = in.readString();
+ int shardResultsSize = in.readInt();
+
+ List<ProfileShardResult> shardResult = new ArrayList<>(shardResultsSize);
+
+ for (int j = 0; j < shardResultsSize; j++) {
+ ProfileShardResult result = new ProfileShardResult(in);
+ shardResult.add(result);
+ }
+ shardResults.put(key, shardResult);
+ }
+ }
+
+ public Map<String, List<ProfileShardResult>> getShardResults() {
+ return this.shardResults;
+ }
+
+ @Override
+ public InternalProfileShardResults readFrom(StreamInput in) throws IOException {
+ return new InternalProfileShardResults(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeInt(shardResults.size());
+ for (Map.Entry<String, List<ProfileShardResult>> entry : shardResults.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeInt(entry.getValue().size());
+
+ for (ProfileShardResult result : entry.getValue()) {
+ result.writeTo(out);
+ }
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject("profile").startArray("shards");
+
+ for (Map.Entry<String, List<ProfileShardResult>> entry : shardResults.entrySet()) {
+ builder.startObject().field("id",entry.getKey()).startArray("searches");
+ for (ProfileShardResult result : entry.getValue()) {
+ builder.startObject();
+ result.toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endArray().endObject();
+ }
+
+ builder.endArray().endObject();
+ return builder;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java
new file mode 100644
index 0000000000..4bc8a85a78
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java
@@ -0,0 +1,235 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.search.Query;
+
+import java.util.*;
+import java.util.concurrent.LinkedBlockingDeque;
+
+/**
+ * This class tracks the dependency tree for queries (scoring and rewriting) and
+ * generates {@link ProfileBreakdown} for each node in the tree. It also finalizes the tree
+ * and returns a list of {@link ProfileResult} that can be serialized back to the client
+ */
+final class InternalProfileTree {
+
+ private ArrayList<ProfileBreakdown> timings;
+
+ /** Maps the Query to it's list of children. This is basically the dependency tree */
+ private ArrayList<ArrayList<Integer>> tree;
+
+ /** A list of the original queries, keyed by index position */
+ private ArrayList<Query> queries;
+
+ /** A list of top-level "roots". Each root can have its own tree of profiles */
+ private ArrayList<Integer> roots;
+
+ /** Rewrite time */
+ private long rewriteTime;
+ private long rewriteScratch;
+
+ /** A temporary stack used to record where we are in the dependency tree. Only used by scoring queries */
+ private Deque<Integer> stack;
+
+ private int currentToken = 0;
+
+ public InternalProfileTree() {
+ timings = new ArrayList<>(10);
+ stack = new LinkedBlockingDeque<>(10);
+ tree = new ArrayList<>(10);
+ queries = new ArrayList<>(10);
+ roots = new ArrayList<>(10);
+ }
+
+ /**
+ * Returns a {@link ProfileBreakdown} for a scoring query. Scoring queries (e.g. those
+ * that are past the rewrite phase and are now being wrapped by createWeight() ) follow
+ * a recursive progression. We can track the dependency tree by a simple stack
+ *
+ * The only hiccup is that the first scoring query will be identical to the last rewritten
+ * query, so we need to take special care to fix that
+ *
+ * @param query The scoring query we wish to profile
+ * @return A ProfileBreakdown for this query
+ */
+ public ProfileBreakdown getQueryBreakdown(Query query) {
+ int token = currentToken;
+
+ boolean stackEmpty = stack.isEmpty();
+
+ // If the stack is empty, we are a new root query
+ if (stackEmpty) {
+
+ // We couldn't find a rewritten query to attach to, so just add it as a
+ // top-level root. This is just a precaution: it really shouldn't happen.
+ // We would only get here if a top-level query that never rewrites for some reason.
+ roots.add(token);
+
+ // Increment the token since we are adding a new node, but notably, do not
+ // updateParent() because this was added as a root
+ currentToken += 1;
+ stack.add(token);
+
+ return addDependencyNode(query, token);
+ }
+
+ updateParent(token);
+
+ // Increment the token since we are adding a new node
+ currentToken += 1;
+ stack.add(token);
+
+ return addDependencyNode(query, token);
+ }
+
+ /**
+ * Begin timing a query for a specific Timing context
+ */
+ public void startRewriteTime() {
+ assert rewriteScratch == 0;
+ rewriteScratch = System.nanoTime();
+ }
+
+ /**
+ * Halt the timing process and add the elapsed rewriting time.
+ * startRewriteTime() must be called for a particular context prior to calling
+ * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and
+ * nonsensical
+ *
+ * @return The elapsed time
+ */
+ public long stopAndAddRewriteTime() {
+ long time = Math.max(1, System.nanoTime() - rewriteScratch);
+ rewriteTime += time;
+ rewriteScratch = 0;
+ return time;
+ }
+
+ /**
+ * Helper method to add a new node to the dependency tree.
+ *
+ * Initializes a new list in the dependency tree, saves the query and
+ * generates a new {@link ProfileBreakdown} to track the timings
+ * of this query
+ *
+ * @param query The query to profile
+ * @param token The assigned token for this query
+ * @return A ProfileBreakdown to profile this query
+ */
+ private ProfileBreakdown addDependencyNode(Query query, int token) {
+
+ // Add a new slot in the dependency tree
+ tree.add(new ArrayList<>(5));
+
+ // Save our query for lookup later
+ queries.add(query);
+
+ ProfileBreakdown queryTimings = new ProfileBreakdown();
+ timings.add(token, queryTimings);
+ return queryTimings;
+ }
+
+ /**
+ * Removes the last (e.g. most recent) value on the stack
+ */
+ public void pollLast() {
+ stack.pollLast();
+ }
+
+ /**
+ * After the query has been run and profiled, we need to merge the flat timing map
+ * with the dependency graph to build a data structure that mirrors the original
+ * query tree
+ *
+ * @return a hierarchical representation of the profiled query tree
+ */
+ public List<ProfileResult> getQueryTree() {
+ ArrayList<ProfileResult> results = new ArrayList<>(5);
+ for (Integer root : roots) {
+ results.add(doGetQueryTree(root));
+ }
+ return results;
+ }
+
+ /**
+ * Recursive helper to finalize a node in the dependency tree
+ * @param token The node we are currently finalizing
+ * @return A hierarchical representation of the tree inclusive of children at this level
+ */
+ private ProfileResult doGetQueryTree(int token) {
+ Query query = queries.get(token);
+ ProfileBreakdown breakdown = timings.get(token);
+ Map<String, Long> timings = breakdown.toTimingMap();
+ List<Integer> children = tree.get(token);
+ List<ProfileResult> childrenProfileResults = Collections.emptyList();
+
+ if (children != null) {
+ childrenProfileResults = new ArrayList<>(children.size());
+ for (Integer child : children) {
+ ProfileResult childNode = doGetQueryTree(child);
+ childrenProfileResults.add(childNode);
+ }
+ }
+
+ // TODO this would be better done bottom-up instead of top-down to avoid
+ // calculating the same times over and over...but worth the effort?
+ long nodeTime = getNodeTime(timings, childrenProfileResults);
+ String queryDescription = query.getClass().getSimpleName();
+ String luceneName = query.toString();
+ return new ProfileResult(queryDescription, luceneName, timings, childrenProfileResults, nodeTime);
+ }
+
+ public long getRewriteTime() {
+ return rewriteTime;
+ }
+
+ /**
+ * Internal helper to add a child to the current parent node
+ *
+ * @param childToken The child to add to the current parent
+ */
+ private void updateParent(int childToken) {
+ Integer parent = stack.peekLast();
+ ArrayList<Integer> parentNode = tree.get(parent);
+ parentNode.add(childToken);
+ tree.set(parent, parentNode);
+ }
+
+ /**
+ * Internal helper to calculate the time of a node, inclusive of children
+ *
+ * @param timings A map of breakdown timing for the node
+ * @param children All children profile results at this node
+ * @return The total time at this node, inclusive of children
+ */
+ private static long getNodeTime(Map<String, Long> timings, List<ProfileResult> children) {
+ long nodeTime = 0;
+ for (long time : timings.values()) {
+ nodeTime += time;
+ }
+
+ // Then add up our children
+ for (ProfileResult child : children) {
+ nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren());
+ }
+ return nodeTime;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java
new file mode 100644
index 0000000000..55ad77b693
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * A record of timings for the various operations that may happen during query execution.
+ * A node's time may be composed of several internal attributes (rewriting, weighting,
+ * scoring, etc).
+ */
+public final class ProfileBreakdown {
+
+ /** Enumeration of all supported timing types. */
+ public enum TimingType {
+ CREATE_WEIGHT,
+ BUILD_SCORER,
+ NEXT_DOC,
+ ADVANCE,
+ MATCH,
+ SCORE;
+
+ @Override
+ public String toString() {
+ return name().toLowerCase(Locale.ROOT);
+ }
+ }
+
+ /**
+ * The accumulated timings for this query node
+ */
+ private final long[] timings;
+
+ /** Scrach to store the current timing type. */
+ private TimingType currentTimingType;
+
+ /**
+ * The temporary scratch space for holding start-times
+ */
+ private long scratch;
+
+ /** Sole constructor. */
+ public ProfileBreakdown() {
+ timings = new long[TimingType.values().length];
+ }
+
+ /**
+ * Begin timing a query for a specific Timing context
+ * @param timing The timing context being profiled
+ */
+ public void startTime(TimingType timing) {
+ assert currentTimingType == null;
+ assert scratch == 0;
+ currentTimingType = timing;
+ scratch = System.nanoTime();
+ }
+
+ /**
+ * Halt the timing process and save the elapsed time.
+ * startTime() must be called for a particular context prior to calling
+ * stopAndRecordTime(), otherwise the elapsed time will be negative and
+ * nonsensical
+ *
+ * @return The elapsed time
+ */
+ public long stopAndRecordTime() {
+ long time = Math.max(1, System.nanoTime() - scratch);
+ timings[currentTimingType.ordinal()] += time;
+ currentTimingType = null;
+ scratch = 0L;
+ return time;
+ }
+
+ /** Convert this record to a map from {@link TimingType} to times. */
+ public Map<String, Long> toTimingMap() {
+ Map<String, Long> map = new HashMap<>();
+ for (TimingType timingType : TimingType.values()) {
+ map.put(timingType.toString(), timings[timingType.ordinal()]);
+ }
+ return Collections.unmodifiableMap(map);
+ }
+
+ /**
+ * Add <code>other</code>'s timings into this breakdown
+ * @param other Another Breakdown to merge with this one
+ */
+ public void merge(ProfileBreakdown other) {
+ assert(timings.length == other.timings.length);
+ for (int i = 0; i < timings.length; ++i) {
+ timings[i] += other.timings[i];
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java
new file mode 100644
index 0000000000..7d7538c911
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FilterCollector;
+import org.apache.lucene.search.FilterLeafCollector;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.Scorer;
+
+import java.io.IOException;
+
+/** A collector that profiles how much time is spent calling it. */
+final class ProfileCollector extends FilterCollector {
+
+ private long time;
+
+ /** Sole constructor. */
+ public ProfileCollector(Collector in) {
+ super(in);
+ }
+
+ /** Return the wrapped collector. */
+ public Collector getDelegate() {
+ return in;
+ }
+
+ @Override
+ public boolean needsScores() {
+ final long start = System.nanoTime();
+ try {
+ return super.needsScores();
+ } finally {
+ time += Math.max(1, System.nanoTime() - start);
+ }
+ }
+
+ @Override
+ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
+ final long start = System.nanoTime();
+ final LeafCollector inLeafCollector;
+ try {
+ inLeafCollector = super.getLeafCollector(context);
+ } finally {
+ time += Math.max(1, System.nanoTime() - start);
+ }
+ return new FilterLeafCollector(inLeafCollector) {
+
+ @Override
+ public void collect(int doc) throws IOException {
+ final long start = System.nanoTime();
+ try {
+ super.collect(doc);
+ } finally {
+ time += Math.max(1, System.nanoTime() - start);
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ final long start = System.nanoTime();
+ try {
+ super.setScorer(scorer);
+ } finally {
+ time += Math.max(1, System.nanoTime() - start);
+ }
+ }
+ };
+ }
+
+ /** Return the total time spent on this collector. */
+ public long getTime() {
+ return time;
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java
new file mode 100644
index 0000000000..4c8752fdaf
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * This class is the internal representation of a profiled Query, corresponding
+ * to a single node in the query tree. It is built after the query has finished executing
+ * and is merely a structured representation, rather than the entity that collects the timing
+ * profile (see InternalProfiler for that)
+ *
+ * Each InternalProfileResult has a List of InternalProfileResults, which will contain
+ * "children" queries if applicable
+ */
+final class ProfileResult implements Writeable<ProfileResult>, ToXContent {
+
+ private static final ParseField QUERY_TYPE = new ParseField("query_type");
+ private static final ParseField LUCENE_DESCRIPTION = new ParseField("lucene");
+ private static final ParseField NODE_TIME = new ParseField("time");
+ private static final ParseField CHILDREN = new ParseField("children");
+ private static final ParseField BREAKDOWN = new ParseField("breakdown");
+
+ private final String queryType;
+ private final String luceneDescription;
+ private final Map<String, Long> timings;
+ private final long nodeTime;
+ private final List<ProfileResult> children;
+
+ public ProfileResult(String queryType, String luceneDescription, Map<String, Long> timings, List<ProfileResult> children, long nodeTime) {
+ this.queryType = queryType;
+ this.luceneDescription = luceneDescription;
+ this.timings = timings;
+ this.children = children;
+ this.nodeTime = nodeTime;
+ }
+
+ public ProfileResult(StreamInput in) throws IOException{
+ this.queryType = in.readString();
+ this.luceneDescription = in.readString();
+ this.nodeTime = in.readLong();
+
+ int timingsSize = in.readVInt();
+ this.timings = new HashMap<>(timingsSize);
+ for (int i = 0; i < timingsSize; ++i) {
+ timings.put(in.readString(), in.readLong());
+ }
+
+ int size = in.readVInt();
+ this.children = new ArrayList<>(size);
+
+ for (int i = 0; i < size; i++) {
+ children.add(new ProfileResult(in));
+ }
+ }
+
+ /**
+ * Retrieve the lucene description of this query (e.g. the "explain" text)
+ */
+ public String getLuceneDescription() {
+ return luceneDescription;
+ }
+
+ /**
+ * Retrieve the name of the query (e.g. "TermQuery")
+ */
+ public String getQueryName() {
+ return queryType;
+ }
+
+ /**
+ * Returns the timing breakdown for this particular query node
+ */
+ public Map<String, Long> getTimeBreakdown() {
+ return Collections.unmodifiableMap(timings);
+ }
+
+ /**
+ * Returns the total time (inclusive of children) for this query node.
+ *
+ * @return elapsed time in nanoseconds
+ */
+ public long getTime() {
+ return nodeTime;
+ }
+
+ /**
+ * Returns a list of all profiled children queries
+ */
+ public List<ProfileResult> getProfiledChildren() {
+ return Collections.unmodifiableList(children);
+ }
+
+ @Override
+ public ProfileResult readFrom(StreamInput in) throws IOException {
+ return new ProfileResult(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(queryType);
+ out.writeString(luceneDescription);
+ out.writeLong(nodeTime); // not Vlong because can be negative
+ out.writeVInt(timings.size());
+ for (Map.Entry<String, Long> entry : timings.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeLong(entry.getValue());
+ }
+ out.writeVInt(children.size());
+ for (ProfileResult child : children) {
+ child.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder = builder.startObject()
+ .field(QUERY_TYPE.getPreferredName(), queryType)
+ .field(LUCENE_DESCRIPTION.getPreferredName(), luceneDescription)
+ .field(NODE_TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double)(getTime() / 1000000.0)))
+ .field(BREAKDOWN.getPreferredName(), timings);
+
+ if (!children.isEmpty()) {
+ builder = builder.startArray(CHILDREN.getPreferredName());
+ for (ProfileResult child : children) {
+ builder = child.toXContent(builder, params);
+ }
+ builder = builder.endArray();
+ }
+
+ builder = builder.endObject();
+ return builder;
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java
new file mode 100644
index 0000000000..b0dc6f2cd4
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TwoPhaseIterator;
+import org.apache.lucene.search.Weight;
+
+import java.io.IOException;
+import java.util.Collection;
+
+/**
+ * {@link Scorer} wrapper that will compute how much time is spent on moving
+ * the iterator, confirming matches and computing scores.
+ */
+final class ProfileScorer extends Scorer {
+
+ private final Scorer scorer;
+ private ProfileWeight profileWeight;
+ private final ProfileBreakdown profile;
+
+ ProfileScorer(ProfileWeight w, Scorer scorer, ProfileBreakdown profile) throws IOException {
+ super(w);
+ this.scorer = scorer;
+ this.profileWeight = w;
+ this.profile = profile;
+ }
+
+ @Override
+ public int docID() {
+ return scorer.docID();
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.ADVANCE);
+ try {
+ return scorer.advance(target);
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC);
+ try {
+ return scorer.nextDoc();
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public float score() throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.SCORE);
+ try {
+ return scorer.score();
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return scorer.freq();
+ }
+
+ @Override
+ public long cost() {
+ return scorer.cost();
+ }
+
+ @Override
+ public Weight getWeight() {
+ return profileWeight;
+ }
+
+ @Override
+ public Collection<ChildScorer> getChildren() {
+ return scorer.getChildren();
+ }
+
+ @Override
+ public TwoPhaseIterator asTwoPhaseIterator() {
+ final TwoPhaseIterator in = scorer.asTwoPhaseIterator();
+ if (in == null) {
+ return null;
+ }
+ final DocIdSetIterator inApproximation = in.approximation();
+ final DocIdSetIterator approximation = new DocIdSetIterator() {
+
+ @Override
+ public int advance(int target) throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.ADVANCE);
+ try {
+ return inApproximation.advance(target);
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC);
+ try {
+ return inApproximation.nextDoc();
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public int docID() {
+ return inApproximation.docID();
+ }
+
+ @Override
+ public long cost() {
+ return inApproximation.cost();
+ }
+ };
+ return new TwoPhaseIterator(approximation) {
+ @Override
+ public boolean matches() throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.MATCH);
+ try {
+ return in.matches();
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public float matchCost() {
+ return in.matchCost();
+ }
+ };
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java
new file mode 100644
index 0000000000..6e005babb3
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * A container class to hold the profile results for a single shard in the request.
+ * Contains a list of query profiles, a collector tree and a total rewrite tree.
+ */
+public final class ProfileShardResult implements Writeable<ProfileShardResult>, ToXContent {
+
+ private final List<ProfileResult> profileResults;
+
+ private final CollectorResult profileCollector;
+
+ private final long rewriteTime;
+
+ public ProfileShardResult(List<ProfileResult> profileResults, long rewriteTime,
+ CollectorResult profileCollector) {
+ assert(profileCollector != null);
+ this.profileResults = profileResults;
+ this.profileCollector = profileCollector;
+ this.rewriteTime = rewriteTime;
+ }
+
+ public ProfileShardResult(StreamInput in) throws IOException {
+ int profileSize = in.readVInt();
+ profileResults = new ArrayList<>(profileSize);
+ for (int j = 0; j < profileSize; j++) {
+ profileResults.add(new ProfileResult(in));
+ }
+
+ profileCollector = new CollectorResult(in);
+ rewriteTime = in.readLong();
+ }
+
+ public List<ProfileResult> getQueryResults() {
+ return Collections.unmodifiableList(profileResults);
+ }
+
+ public long getRewriteTime() {
+ return rewriteTime;
+ }
+
+ public CollectorResult getCollectorResult() {
+ return profileCollector;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startArray("query");
+ for (ProfileResult p : profileResults) {
+ p.toXContent(builder, params);
+ }
+ builder.endArray();
+ builder.field("rewrite_time", rewriteTime);
+ builder.startArray("collector");
+ profileCollector.toXContent(builder, params);
+ builder.endArray();
+ return builder;
+ }
+
+ @Override
+ public ProfileShardResult readFrom(StreamInput in) throws IOException {
+ return new ProfileShardResult(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(profileResults.size());
+ for (ProfileResult p : profileResults) {
+ p.writeTo(out);
+ }
+ profileCollector.writeTo(out);
+ out.writeLong(rewriteTime);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java
new file mode 100644
index 0000000000..1ce5cd721f
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BulkScorer;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * Weight wrapper that will compute how much time it takes to build the
+ * {@link Scorer} and then return a {@link Scorer} that is wrapped in
+ * order to compute timings as well.
+ */
+public final class ProfileWeight extends Weight {
+
+ private final Weight subQueryWeight;
+ private final ProfileBreakdown profile;
+
+ public ProfileWeight(Query query, Weight subQueryWeight, ProfileBreakdown profile) throws IOException {
+ super(query);
+ this.subQueryWeight = subQueryWeight;
+ this.profile = profile;
+ }
+
+ @Override
+ public Scorer scorer(LeafReaderContext context) throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.BUILD_SCORER);
+ final Scorer subQueryScorer;
+ try {
+ subQueryScorer = subQueryWeight.scorer(context);
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ if (subQueryScorer == null) {
+ return null;
+ }
+
+ return new ProfileScorer(this, subQueryScorer, profile);
+ }
+
+ @Override
+ public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
+ // We use the default bulk scorer instead of the specialized one. The reason
+ // is that Lucene's BulkScorers do everything at once: finding matches,
+ // scoring them and calling the collector, so they make it impossible to
+ // see where time is spent, which is the purpose of query profiling.
+ // The default bulk scorer will pull a scorer and iterate over matches,
+ // this might be a significantly different execution path for some queries
+ // like disjunctions, but in general this is what is done anyway
+ return super.bulkScorer(context);
+ }
+
+ @Override
+ public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+ return subQueryWeight.explain(context, doc);
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ return subQueryWeight.getValueForNormalization();
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ subQueryWeight.normalize(norm, topLevelBoost);
+ }
+
+ @Override
+ public void extractTerms(Set<Term> set) {
+ subQueryWeight.extractTerms(set);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profiler.java b/core/src/main/java/org/elasticsearch/search/profile/Profiler.java
new file mode 100644
index 0000000000..bf0c9ec01b
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/Profiler.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.search.Query;
+
+import java.util.*;
+
+/**
+ * This class acts as a thread-local storage for profiling a query. It also
+ * builds a representation of the query tree which is built constructed
+ * "online" as the weights are wrapped by ContextIndexSearcher. This allows us
+ * to know the relationship between nodes in tree without explicitly
+ * walking the tree or pre-wrapping everything
+ *
+ * A Profiler is associated with every Search, not per Search-Request. E.g. a
+ * request may execute two searches (query + global agg). A Profiler just
+ * represents one of those
+ */
+public final class Profiler {
+
+ private final InternalProfileTree queryTree = new InternalProfileTree();
+
+ /**
+ * The root Collector used in the search
+ */
+ private InternalProfileCollector collector;
+
+ public Profiler() {}
+
+ /** Set the collector that is associated with this profiler. */
+ public void setCollector(InternalProfileCollector collector) {
+ if (this.collector != null) {
+ throw new IllegalStateException("The collector can only be set once.");
+ }
+ this.collector = Objects.requireNonNull(collector);
+ }
+
+ /**
+ * Get the {@link ProfileBreakdown} for the given query, potentially creating it if it did not exist.
+ * This should only be used for queries that will be undergoing scoring. Do not use it to profile the
+ * rewriting phase
+ */
+ public ProfileBreakdown getQueryBreakdown(Query query) {
+ return queryTree.getQueryBreakdown(query);
+ }
+
+ /**
+ * Begin timing the rewrite phase of a request. All rewrites are accumulated together into a
+ * single metric
+ */
+ public void startRewriteTime() {
+ queryTree.startRewriteTime();
+ }
+
+ /**
+ * Stop recording the current rewrite and add it's time to the total tally, returning the
+ * cumulative time so far.
+ *
+ * @return cumulative rewrite time
+ */
+ public long stopAndAddRewriteTime() {
+ return queryTree.stopAndAddRewriteTime();
+ }
+
+ /**
+ * Removes the last (e.g. most recent) query on the stack. This should only be called for scoring
+ * queries, not rewritten queries
+ */
+ public void pollLastQuery() {
+ queryTree.pollLast();
+ }
+
+ /**
+ * @return a hierarchical representation of the profiled query tree
+ */
+ public List<ProfileResult> getQueryTree() {
+ return queryTree.getQueryTree();
+ }
+
+ /**
+ * @return total time taken to rewrite all queries in this profile
+ */
+ public long getRewriteTime() {
+ return queryTree.getRewriteTime();
+ }
+
+ /**
+ * Return the current root Collector for this search
+ */
+ public CollectorResult getCollector() {
+ return collector.getCollectorTree();
+ }
+
+ /**
+ * Helper method to convert Profiler into InternalProfileShardResults, which can be
+ * serialized to other nodes, emitted as JSON, etc.
+ *
+ * @param profilers A list of Profilers to convert into InternalProfileShardResults
+ * @return A list of corresponding InternalProfileShardResults
+ */
+ public static List<ProfileShardResult> buildShardResults(List<Profiler> profilers) {
+ List<ProfileShardResult> results = new ArrayList<>(profilers.size());
+ for (Profiler profiler : profilers) {
+ ProfileShardResult result = new ProfileShardResult(
+ profiler.getQueryTree(), profiler.getRewriteTime(), profiler.getCollector());
+ results.add(result);
+ }
+ return results;
+ }
+
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profilers.java b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java
new file mode 100644
index 0000000000..0fb7d9ac1c
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/** Wrapper around several {@link Profiler}s that makes management easier. */
+public final class Profilers {
+
+ private final ContextIndexSearcher searcher;
+ private final List<Profiler> profilers;
+
+ /** Sole constructor. This {@link Profilers} instance will initiall wrap one {@link Profiler}. */
+ public Profilers(ContextIndexSearcher searcher) {
+ this.searcher = searcher;
+ this.profilers = new ArrayList<>();
+ addProfiler();
+ }
+
+ /** Switch to a new profile. */
+ public Profiler addProfiler() {
+ Profiler profiler = new Profiler();
+ searcher.setProfiler(profiler);
+ profilers.add(profiler);
+ return profiler;
+ }
+
+ /** Get the current profiler. */
+ public Profiler getCurrent() {
+ return profilers.get(profilers.size() - 1);
+ }
+
+ /** Return the list of all created {@link Profiler}s so far. */
+ public List<Profiler> getProfilers() {
+ return Collections.unmodifiableList(profilers);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
index ce8836cd33..08ff849871 100644
--- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
@@ -52,13 +52,16 @@ import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.profile.*;
import org.elasticsearch.search.rescore.RescorePhase;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.sort.SortParseElement;
import org.elasticsearch.search.sort.TrackScoresParseElement;
import org.elasticsearch.search.suggest.SuggestPhase;
+import java.util.AbstractList;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -124,6 +127,11 @@ public class QueryPhase implements SearchPhase {
}
suggestPhase.execute(searchContext);
aggregationPhase.execute(searchContext);
+
+ if (searchContext.getProfilers() != null) {
+ List<ProfileShardResult> shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers());
+ searchContext.queryResult().profileResults(shardResults);
+ }
}
private static boolean returnsDocsInOrder(Query query, Sort sort) {
@@ -147,6 +155,7 @@ public class QueryPhase implements SearchPhase {
QuerySearchResult queryResult = searchContext.queryResult();
queryResult.searchTimedOut(false);
+ final boolean doProfile = searchContext.getProfilers() != null;
final SearchType searchType = searchContext.searchType();
boolean rescore = false;
try {
@@ -162,9 +171,13 @@ public class QueryPhase implements SearchPhase {
Callable<TopDocs> topDocsCallable;
assert query == searcher.rewrite(query); // already rewritten
+
if (searchContext.size() == 0) { // no matter what the value of from is
final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
collector = totalHitCountCollector;
+ if (searchContext.getProfilers() != null) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_COUNT, Collections.emptyList());
+ }
topDocsCallable = new Callable<TopDocs>() {
@Override
public TopDocs call() throws Exception {
@@ -219,6 +232,9 @@ public class QueryPhase implements SearchPhase {
topDocsCollector = TopScoreDocCollector.create(numDocs, lastEmittedDoc);
}
collector = topDocsCollector;
+ if (doProfile) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TOP_HITS, Collections.emptyList());
+ }
topDocsCallable = new Callable<TopDocs>() {
@Override
public TopDocs call() throws Exception {
@@ -254,27 +270,57 @@ public class QueryPhase implements SearchPhase {
final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER;
if (terminateAfterSet) {
+ final Collector child = collector;
// throws Lucene.EarlyTerminationException when given count is reached
collector = Lucene.wrapCountBasedEarlyTerminatingCollector(collector, searchContext.terminateAfter());
+ if (doProfile) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT,
+ Collections.singletonList((InternalProfileCollector) child));
+ }
}
if (searchContext.parsedPostFilter() != null) {
+ final Collector child = collector;
// this will only get applied to the actual search collector and not
// to any scoped collectors, also, it will only be applied to the main collector
// since that is where the filter should only work
final Weight filterWeight = searcher.createNormalizedWeight(searchContext.parsedPostFilter().query(), false);
collector = new FilteredCollector(collector, filterWeight);
+ if (doProfile) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_POST_FILTER,
+ Collections.singletonList((InternalProfileCollector) child));
+ }
}
// plug in additional collectors, like aggregations
- List<Collector> allCollectors = new ArrayList<>();
- allCollectors.add(collector);
- allCollectors.addAll(searchContext.queryCollectors().values());
- collector = MultiCollector.wrap(allCollectors);
+ final List<Collector> subCollectors = new ArrayList<>();
+ subCollectors.add(collector);
+ subCollectors.addAll(searchContext.queryCollectors().values());
+ collector = MultiCollector.wrap(subCollectors);
+ if (doProfile && collector instanceof InternalProfileCollector == false) {
+ // When there is a single collector to wrap, MultiCollector returns it
+ // directly, so only wrap in the case that there are several sub collectors
+ final List<InternalProfileCollector> children = new AbstractList<InternalProfileCollector>() {
+ @Override
+ public InternalProfileCollector get(int index) {
+ return (InternalProfileCollector) subCollectors.get(index);
+ }
+ @Override
+ public int size() {
+ return subCollectors.size();
+ }
+ };
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MULTI, children);
+ }
// apply the minimum score after multi collector so we filter aggs as well
if (searchContext.minimumScore() != null) {
+ final Collector child = collector;
collector = new MinimumScoreCollector(collector, searchContext.minimumScore());
+ if (doProfile) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MIN_SCORE,
+ Collections.singletonList((InternalProfileCollector) child));
+ }
}
if (collector.getClass() == TotalHitCountCollector.class) {
@@ -319,13 +365,21 @@ public class QueryPhase implements SearchPhase {
final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis();
if (timeoutSet && collector != null) { // collector might be null if no collection is actually needed
+ final Collector child = collector;
// TODO: change to use our own counter that uses the scheduler in ThreadPool
// throws TimeLimitingCollector.TimeExceededException when timeout has reached
collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis());
+ if (doProfile) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TIMEOUT,
+ Collections.singletonList((InternalProfileCollector) child));
+ }
}
try {
if (collector != null) {
+ if (doProfile) {
+ searchContext.getProfilers().getCurrent().setCollector((InternalProfileCollector) collector);
+ }
searcher.search(query, collector);
}
} catch (TimeLimitingCollector.TimeExceededException e) {
@@ -343,7 +397,13 @@ public class QueryPhase implements SearchPhase {
queryResult.topDocs(topDocsCallable.call());
+ if (searchContext.getProfilers() != null) {
+ List<ProfileShardResult> shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers());
+ searchContext.queryResult().profileResults(shardResults);
+ }
+
return rescore;
+
} catch (Throwable e) {
throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e);
}
diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
index 7f8d12a9c9..9223eb5a82 100644
--- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
+++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
@@ -20,6 +20,8 @@
package org.elasticsearch.search.query;
import org.apache.lucene.search.TopDocs;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -29,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
+import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
@@ -53,6 +56,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
private Suggest suggest;
private boolean searchTimedOut;
private Boolean terminatedEarly = null;
+ private List<ProfileShardResult> profileShardResults;
public QuerySearchResult() {
@@ -120,6 +124,22 @@ public class QuerySearchResult extends QuerySearchResultProvider {
this.aggregations = aggregations;
}
+ /**
+ * Returns the profiled results for this search, or potentially null if result was empty
+ * @return The profiled results, or null
+ */
+ public @Nullable List<ProfileShardResult> profileResults() {
+ return profileShardResults;
+ }
+
+ /**
+ * Sets the finalized profiling results for this query
+ * @param shardResults The finalized profile
+ */
+ public void profileResults(List<ProfileShardResult> shardResults) {
+ this.profileShardResults = shardResults;
+ }
+
public List<SiblingPipelineAggregator> pipelineAggregators() {
return pipelineAggregators;
}
@@ -191,6 +211,15 @@ public class QuerySearchResult extends QuerySearchResultProvider {
}
searchTimedOut = in.readBoolean();
terminatedEarly = in.readOptionalBoolean();
+
+ if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
+ int profileSize = in.readVInt();
+ profileShardResults = new ArrayList<>(profileSize);
+ for (int i = 0; i < profileSize; i++) {
+ ProfileShardResult result = new ProfileShardResult(in);
+ profileShardResults.add(result);
+ }
+ }
}
@Override
@@ -229,5 +258,17 @@ public class QuerySearchResult extends QuerySearchResultProvider {
}
out.writeBoolean(searchTimedOut);
out.writeOptionalBoolean(terminatedEarly);
+
+ if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
+ if (profileShardResults == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeVInt(profileShardResults.size());
+ for (ProfileShardResult shardResult : profileShardResults) {
+ shardResult.writeTo(out);
+ }
+ }
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java
index c465eaf6ef..e4fe2c08f7 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java
@@ -50,6 +50,7 @@ import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -130,7 +131,7 @@ public class ScriptSortParser implements SortParser {
if (type == null) {
throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation());
}
- final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH);
+ final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap());
if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) {
throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation());
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java
index 106672ae7a..6a0155ffb7 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java
@@ -30,7 +30,7 @@ import org.apache.lucene.search.suggest.document.TopSuggestDocs;
import org.apache.lucene.search.suggest.document.TopSuggestDocsCollector;
import org.apache.lucene.util.*;
import org.apache.lucene.util.PriorityQueue;
-import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.fielddata.AtomicFieldData;
import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.index.mapper.MappedFieldType;
@@ -57,7 +57,7 @@ public class CompletionSuggester extends Suggester<CompletionSuggestionContext>
}
CompletionSuggestion completionSuggestion = new CompletionSuggestion(name, suggestionContext.getSize());
spare.copyUTF8Bytes(suggestionContext.getText());
- CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new StringText(spare.toString()), 0, spare.length());
+ CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new Text(spare.toString()), 0, spare.length());
completionSuggestion.addTerm(completionSuggestEntry);
TopSuggestDocsCollector collector = new TopDocumentsCollector(suggestionContext.getSize());
suggest(searcher, suggestionContext.toQuery(), collector);
@@ -91,7 +91,7 @@ public class CompletionSuggester extends Suggester<CompletionSuggestionContext>
}
if (numResult++ < suggestionContext.getSize()) {
CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(
- new StringText(suggestDoc.key.toString()), suggestDoc.score, contexts, payload);
+ new Text(suggestDoc.key.toString()), suggestDoc.score, contexts, payload);
completionSuggestEntry.addOption(option);
} else {
break;
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java
index 4bbdaf9c49..9b083a9178 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java
@@ -39,6 +39,7 @@ import org.elasticsearch.search.suggest.SuggestionSearchContext;
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator;
import java.io.IOException;
+import java.util.Collections;
public final class PhraseSuggestParser implements SuggestContextParser {
@@ -143,7 +144,7 @@ public final class PhraseSuggestParser implements SuggestContextParser {
}
Template template = Template.parse(parser, parseFieldMatcher);
CompiledScript compiledScript = suggester.scriptService().compile(template, ScriptContext.Standard.SEARCH,
- headersContext);
+ headersContext, Collections.emptyMap());
suggestion.setCollateQueryScript(compiledScript);
} else if ("params".equals(fieldName)) {
suggestion.setCollateScriptParams(parser.map());
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
index fccf9ebc30..c7fa6fae30 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
@@ -30,7 +30,6 @@ import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lucene.Lucene;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.query.ParsedQuery;
@@ -127,11 +126,11 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
if (!collateMatch && !collatePrune) {
continue;
}
- Text phrase = new StringText(spare.toString());
+ Text phrase = new Text(spare.toString());
Text highlighted = null;
if (suggestion.getPreTag() != null) {
spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag()));
- highlighted = new StringText(spare.toString());
+ highlighted = new Text(spare.toString());
}
if (collatePrune) {
resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score), collateMatch));
@@ -147,7 +146,7 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
private PhraseSuggestion.Entry buildResultEntry(PhraseSuggestionContext suggestion, CharsRefBuilder spare, double cutoffScore) {
spare.copyUTF8Bytes(suggestion.getText());
- return new PhraseSuggestion.Entry(new StringText(spare.toString()), 0, spare.length(), cutoffScore);
+ return new PhraseSuggestion.Entry(new Text(spare.toString()), 0, spare.length(), cutoffScore);
}
ScriptService scriptService() {
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java
index 4c1b176c99..34cd3ad4d5 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java
@@ -27,8 +27,6 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.common.bytes.BytesArray;
-import org.elasticsearch.common.text.BytesText;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.search.suggest.SuggestContextParser;
import org.elasticsearch.search.suggest.SuggestUtils;
@@ -54,10 +52,10 @@ public final class TermSuggester extends Suggester<TermSuggestionContext> {
SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar(
token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode()
);
- Text key = new BytesText(new BytesArray(token.term.bytes()));
+ Text key = new Text(new BytesArray(token.term.bytes()));
TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset);
for (SuggestWord suggestWord : suggestedWords) {
- Text word = new StringText(suggestWord.string);
+ Text word = new Text(suggestWord.string);
resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score));
}
response.addTerm(resultEntry);