summaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
authorjavanna <cavannaluca@gmail.com>2017-05-17 17:59:00 +0200
committerLuca Cavanna <luca@elastic.co>2017-05-17 17:59:00 +0200
commitce7326eb88b95c7d9b893fde2e71cc90b09d4fb3 (patch)
tree6b92539451ef529c7b8b2c5b06a417144570fcd2 /core/src
parentd5fc520741943c83cfac58b4f3289f65af233c6d (diff)
parent2ccc223ff761043807683f34b29c693af6c94d95 (diff)
Merge branch 'master' into feature/client_aggs_parsing
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/java/org/elasticsearch/ElasticsearchException.java8
-rw-r--r--core/src/main/java/org/elasticsearch/Version.java93
-rw-r--r--core/src/main/java/org/elasticsearch/action/OriginalIndices.java9
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java10
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java15
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java5
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java5
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java9
-rw-r--r--core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java10
-rw-r--r--core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java6
-rw-r--r--core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/index/IndexRequest.java6
-rw-r--r--core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java4
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/NamedDiff.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java23
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java33
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java49
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java13
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java10
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java1
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java3
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java3
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java4
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java1
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java1
-rw-r--r--core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java8
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java28
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java2
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java234
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/concurrent/ResizableBlockingQueue.java80
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java4
-rw-r--r--core/src/main/java/org/elasticsearch/common/util/concurrent/TimedRunnable.java56
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java4
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java4
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/GatewayService.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/IndexService.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java12
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java16
-rw-r--r--core/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java11
-rw-r--r--core/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java6
-rw-r--r--core/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/IndexShard.java32
-rw-r--r--core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java16
-rw-r--r--core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java252
-rw-r--r--core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java4
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/os/OsStats.java4
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/PluginInfo.java4
-rw-r--r--core/src/main/java/org/elasticsearch/rest/RestStatus.java2
-rw-r--r--core/src/main/java/org/elasticsearch/script/AbstractSearchScript.java8
-rw-r--r--core/src/main/java/org/elasticsearch/script/NativeScriptEngine.java8
-rw-r--r--core/src/main/java/org/elasticsearch/script/NativeScriptFactory.java2
-rw-r--r--core/src/main/java/org/elasticsearch/script/Script.java8
-rw-r--r--core/src/main/java/org/elasticsearch/script/ScriptMetaData.java4
-rw-r--r--core/src/main/java/org/elasticsearch/script/StoredScriptSource.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java8
-rw-r--r--core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java8
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/lookup/CachedPositionIterator.java132
-rw-r--r--core/src/main/java/org/elasticsearch/search/lookup/IndexField.java128
-rw-r--r--core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java298
-rw-r--r--core/src/main/java/org/elasticsearch/search/lookup/IndexLookup.java74
-rw-r--r--core/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java199
-rw-r--r--core/src/main/java/org/elasticsearch/search/lookup/LeafSearchLookup.java13
-rw-r--r--core/src/main/java/org/elasticsearch/search/lookup/PositionIterator.java87
-rw-r--r--core/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/lookup/TermPosition.java58
-rw-r--r--core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java2
-rw-r--r--core/src/main/java/org/elasticsearch/threadpool/AutoQueueAdjustingExecutorBuilder.java166
-rw-r--r--core/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java9
-rw-r--r--core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java8
-rw-r--r--core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java15
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TcpTransport.java10
-rw-r--r--core/src/main/java/org/elasticsearch/tribe/TribeService.java6
-rw-r--r--core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/VersionTests.java43
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java9
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java3
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java54
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java3
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java3
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java5
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java43
-rw-r--r--core/src/test/java/org/elasticsearch/action/fieldstats/FieldStatsRequestTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/main/MainActionTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java31
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java10
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java9
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java259
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java50
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java8
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java25
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java24
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java226
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/concurrent/ResizableBlockingQueueTests.java52
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/index/refresh/RefreshStatsTests.java11
-rw-r--r--core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/index/termvectors/TermVectorsServiceTests.java45
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java9
-rw-r--r--core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsIT.java4
-rw-r--r--core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java3
-rw-r--r--core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java5
-rw-r--r--core/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/script/IndexLookupIT.java1029
-rw-r--r--core/src/test/java/org/elasticsearch/script/NativeScriptTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java15
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java15
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalAggregatorTests.java19
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java38
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java16
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java13
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java42
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java14
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java13
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java13
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorTests.java60
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorTests.java12
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java11
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorTests.java11
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorTests.java12
-rw-r--r--core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java24
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java101
-rw-r--r--core/src/test/java/org/elasticsearch/threadpool/AutoQueueAdjustingExecutorBuilderTests.java41
-rw-r--r--core/src/test/resources/indices/bwc/index-5.2.2.zipbin0 -> 503810 bytes
-rw-r--r--core/src/test/resources/indices/bwc/index-5.3.2.zipbin0 -> 399870 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-5.2.2.zipbin0 -> 246750 bytes
-rw-r--r--core/src/test/resources/indices/bwc/repo-5.3.2.zipbin0 -> 190884 bytes
183 files changed, 1781 insertions, 3236 deletions
diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
index b6ed2cb7a2..ae575af61c 100644
--- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java
+++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
@@ -133,7 +133,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
super(in.readOptionalString(), in.readException());
readStackTrace(this, in);
headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString));
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString));
} else {
for (Iterator<Map.Entry<String, List<String>>> iterator = headers.entrySet().iterator(); iterator.hasNext(); ) {
@@ -284,7 +284,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
out.writeOptionalString(this.getMessage());
out.writeException(this.getCause());
writeStackTraces(this, out);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString);
out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString);
} else {
@@ -985,11 +985,11 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145,
UNKNOWN_VERSION_ADDED),
TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class,
- org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_1_UNRELEASED),
+ org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_1),
SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class,
org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2),
UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException.class,
- org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0_UNRELEASED);
+ org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0);
final Class<? extends ElasticsearchException> exceptionClass;
final CheckedFunction<StreamInput, ? extends ElasticsearchException, IOException> constructor;
diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java
index d23417df07..44b989a378 100644
--- a/core/src/main/java/org/elasticsearch/Version.java
+++ b/core/src/main/java/org/elasticsearch/Version.java
@@ -55,33 +55,25 @@ public class Version implements Comparable<Version> {
public static final Version V_5_0_1 = new Version(V_5_0_1_ID, org.apache.lucene.util.Version.LUCENE_6_2_1);
public static final int V_5_0_2_ID = 5000299;
public static final Version V_5_0_2 = new Version(V_5_0_2_ID, org.apache.lucene.util.Version.LUCENE_6_2_1);
- public static final int V_5_0_3_ID_UNRELEASED = 5000399;
- public static final Version V_5_0_3_UNRELEASED = new Version(V_5_0_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
// no version constant for 5.1.0 due to inadvertent release
- public static final int V_5_1_1_ID_UNRELEASED = 5010199;
- public static final Version V_5_1_1_UNRELEASED = new Version(V_5_1_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
- public static final int V_5_1_2_ID_UNRELEASED = 5010299;
- public static final Version V_5_1_2_UNRELEASED = new Version(V_5_1_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
- public static final int V_5_1_3_ID_UNRELEASED = 5010399;
- public static final Version V_5_1_3_UNRELEASED = new Version(V_5_1_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
- public static final int V_5_2_0_ID_UNRELEASED = 5020099;
- public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0);
- public static final int V_5_2_1_ID_UNRELEASED = 5020199;
- public static final Version V_5_2_1_UNRELEASED = new Version(V_5_2_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
- public static final int V_5_2_2_ID_UNRELEASED = 5020299;
- public static final Version V_5_2_2_UNRELEASED = new Version(V_5_2_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
- public static final int V_5_2_3_ID_UNRELEASED = 5020399;
- public static final Version V_5_2_3_UNRELEASED = new Version(V_5_2_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
- public static final int V_5_3_0_ID_UNRELEASED = 5030099;
- public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
- public static final int V_5_3_1_ID_UNRELEASED = 5030199;
- public static final Version V_5_3_1_UNRELEASED = new Version(V_5_3_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_2);
- public static final int V_5_3_2_ID_UNRELEASED = 5030299;
- public static final Version V_5_3_2_UNRELEASED = new Version(V_5_3_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_2);
- public static final int V_5_4_0_ID_UNRELEASED = 5040099;
- public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
- public static final int V_5_4_1_ID_UNRELEASED = 5040199;
- public static final Version V_5_4_1_UNRELEASED = new Version(V_5_4_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_1);
+ public static final int V_5_1_1_ID = 5010199;
+ public static final Version V_5_1_1 = new Version(V_5_1_1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0);
+ public static final int V_5_1_2_ID = 5010299;
+ public static final Version V_5_1_2 = new Version(V_5_1_2_ID, org.apache.lucene.util.Version.LUCENE_6_3_0);
+ public static final int V_5_2_0_ID = 5020099;
+ public static final Version V_5_2_0 = new Version(V_5_2_0_ID, org.apache.lucene.util.Version.LUCENE_6_4_0);
+ public static final int V_5_2_1_ID = 5020199;
+ public static final Version V_5_2_1 = new Version(V_5_2_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_1);
+ public static final int V_5_2_2_ID = 5020299;
+ public static final Version V_5_2_2 = new Version(V_5_2_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_1);
+ public static final int V_5_3_0_ID = 5030099;
+ public static final Version V_5_3_0 = new Version(V_5_3_0_ID, org.apache.lucene.util.Version.LUCENE_6_4_1);
+ public static final int V_5_3_1_ID = 5030199;
+ public static final Version V_5_3_1 = new Version(V_5_3_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_2);
+ public static final int V_5_3_2_ID = 5030299;
+ public static final Version V_5_3_2 = new Version(V_5_3_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_2);
+ public static final int V_5_4_0_ID = 5040099;
+ public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0);
public static final int V_5_5_0_ID_UNRELEASED = 5050099;
public static final Version V_5_5_0_UNRELEASED = new Version(V_5_5_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
@@ -111,32 +103,24 @@ public class Version implements Comparable<Version> {
return V_6_0_0_alpha1_UNRELEASED;
case V_5_5_0_ID_UNRELEASED:
return V_5_5_0_UNRELEASED;
- case V_5_4_1_ID_UNRELEASED:
- return V_5_4_1_UNRELEASED;
- case V_5_4_0_ID_UNRELEASED:
- return V_5_4_0_UNRELEASED;
- case V_5_3_2_ID_UNRELEASED:
- return V_5_3_2_UNRELEASED;
- case V_5_3_1_ID_UNRELEASED:
- return V_5_3_1_UNRELEASED;
- case V_5_3_0_ID_UNRELEASED:
- return V_5_3_0_UNRELEASED;
- case V_5_2_3_ID_UNRELEASED:
- return V_5_2_3_UNRELEASED;
- case V_5_2_2_ID_UNRELEASED:
- return V_5_2_2_UNRELEASED;
- case V_5_2_1_ID_UNRELEASED:
- return V_5_2_1_UNRELEASED;
- case V_5_2_0_ID_UNRELEASED:
- return V_5_2_0_UNRELEASED;
- case V_5_1_3_ID_UNRELEASED:
- return V_5_1_3_UNRELEASED;
- case V_5_1_2_ID_UNRELEASED:
- return V_5_1_2_UNRELEASED;
- case V_5_1_1_ID_UNRELEASED:
- return V_5_1_1_UNRELEASED;
- case V_5_0_3_ID_UNRELEASED:
- return V_5_0_3_UNRELEASED;
+ case V_5_4_0_ID:
+ return V_5_4_0;
+ case V_5_3_2_ID:
+ return V_5_3_2;
+ case V_5_3_1_ID:
+ return V_5_3_1;
+ case V_5_3_0_ID:
+ return V_5_3_0;
+ case V_5_2_2_ID:
+ return V_5_2_2;
+ case V_5_2_1_ID:
+ return V_5_2_1;
+ case V_5_2_0_ID:
+ return V_5_2_0;
+ case V_5_1_2_ID:
+ return V_5_1_2;
+ case V_5_1_1_ID:
+ return V_5_1_1;
case V_5_0_2_ID:
return V_5_0_2;
case V_5_0_1_ID:
@@ -296,7 +280,7 @@ public class Version implements Comparable<Version> {
final int bwcMinor;
if (this.onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
bwcMajor = major - 1;
- bwcMinor = 0; // TODO we have to move this to the latest released minor of the last major but for now we just keep
+ bwcMinor = 4;
} else {
bwcMajor = major;
bwcMinor = 0;
@@ -306,7 +290,8 @@ public class Version implements Comparable<Version> {
/**
* Returns the minimum created index version that this version supports. Indices created with lower versions
- * can't be used with this version.
+ * can't be used with this version. This should also be used for file based serialization backwards compatibility ie. on serialization
+ * code that is used to read / write file formats like transaction logs, cluster state, and index metadata.
*/
public Version minimumIndexCompatibilityVersion() {
final int bwcMajor;
diff --git a/core/src/main/java/org/elasticsearch/action/OriginalIndices.java b/core/src/main/java/org/elasticsearch/action/OriginalIndices.java
index 39cf5c6324..0642326d2b 100644
--- a/core/src/main/java/org/elasticsearch/action/OriginalIndices.java
+++ b/core/src/main/java/org/elasticsearch/action/OriginalIndices.java
@@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
+import java.util.Arrays;
/**
* Used to keep track of original indices within internal (e.g. shard level) requests
@@ -64,4 +65,12 @@ public final class OriginalIndices implements IndicesRequest {
out.writeStringArrayNullable(originalIndices.indices);
originalIndices.indicesOptions.writeIndicesOptions(out);
}
+
+ @Override
+ public String toString() {
+ return "OriginalIndices{" +
+ "indices=" + Arrays.toString(indices) +
+ ", indicesOptions=" + indicesOptions +
+ '}';
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java
index d80e58232a..aea1ee57dc 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java
@@ -249,8 +249,8 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
}
private void checkVersion(Version version) {
- if (version.before(Version.V_5_2_0_UNRELEASED)) {
- throw new IllegalArgumentException("cannot explain shards in a mixed-cluster with pre-" + Version.V_5_2_0_UNRELEASED +
+ if (version.before(Version.V_5_2_0)) {
+ throw new IllegalArgumentException("cannot explain shards in a mixed-cluster with pre-" + Version.V_5_2_0 +
" nodes, node version [" + version + "]");
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java
index 575fbcd3b9..e9fec716a9 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java
@@ -67,12 +67,20 @@ final class SettingsUpdater {
.transientSettings(transientSettings.build());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
- boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings()) || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings());
+ boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings())
+ || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings());
if (updatedReadOnly) {
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
} else {
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
}
+ boolean updatedReadOnlyAllowDelete = MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(metaData.persistentSettings())
+ || MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(metaData.transientSettings());
+ if (updatedReadOnlyAllowDelete) {
+ blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
+ } else {
+ blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
+ }
ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build();
Settings settings = build.metaData().settings();
// now we try to apply things and if they are invalid we fail
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
index 913dbfff20..dae55b2fc0 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
@@ -67,12 +67,15 @@ public class TransportClusterUpdateSettingsAction extends
@Override
protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, ClusterState state) {
// allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it
- if ((request.transientSettings().isEmpty() &&
- request.persistentSettings().size() == 1 &&
- MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())) ||
- (request.persistentSettings().isEmpty() && request.transientSettings().size() == 1 &&
- MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings()))) {
- return null;
+ if (request.transientSettings().size() + request.persistentSettings().size() == 1) {
+ // only one setting
+ if (MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())
+ || MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings())
+ || MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.exists(request.transientSettings())
+ || MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.exists(request.persistentSettings())) {
+ // one of the settings above as the only setting in the request means - resetting the block!
+ return null;
+ }
}
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
index 36d63bbceb..df38690b79 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java
@@ -134,7 +134,7 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSea
routing = in.readOptionalString();
preference = in.readOptionalString();
- if (in.getVersion().onOrBefore(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().onOrBefore(Version.V_5_1_1)) {
//types
in.readStringArray();
}
@@ -153,7 +153,7 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSea
out.writeOptionalString(routing);
out.writeOptionalString(preference);
- if (out.getVersion().onOrBefore(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().onOrBefore(Version.V_5_1_1)) {
//types
out.writeStringArray(Strings.EMPTY_ARRAY);
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java
index c2fb90434e..b5b28e2b8f 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java
@@ -73,7 +73,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
for (int i = 0; i < nodes.length; i++) {
nodes[i] = new DiscoveryNode(in);
}
- if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
int size = in.readVInt();
indicesAndFilters = new HashMap<>();
for (int i = 0; i < size; i++) {
@@ -95,7 +95,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
for (DiscoveryNode node : nodes) {
node.writeTo(out);
}
- if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
out.writeVInt(indicesAndFilters.size());
for (Map.Entry<String, AliasFilter> entry : indicesAndFilters.entrySet()) {
out.writeString(entry.getKey());
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java
index b8302a03c2..2dc0ed870c 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java
@@ -59,7 +59,7 @@ public class GetStoredScriptResponse extends ActionResponse implements ToXConten
super.readFrom(in);
if (in.readBoolean()) {
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
source = new StoredScriptSource(in);
} else {
source = new StoredScriptSource(in.readString());
@@ -78,7 +78,7 @@ public class GetStoredScriptResponse extends ActionResponse implements ToXConten
} else {
out.writeBoolean(true);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
source.writeTo(out);
} else {
out.writeString(source.getCode());
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java
index f6a9e05539..ca26df75de 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java
@@ -123,7 +123,7 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
id = in.readOptionalString();
content = in.readBytesReference();
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType = XContentType.readFrom(in);
} else {
xContentType = XContentFactory.xContentType(content);
@@ -137,7 +137,7 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
out.writeString(lang == null ? "" : lang);
out.writeOptionalString(id);
out.writeBytesReference(content);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType.writeTo(out);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java
index b92839638d..1e54def238 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java
@@ -117,7 +117,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
startOffset = in.readInt();
endOffset = in.readInt();
position = in.readVInt();
- if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
Integer len = in.readOptionalVInt();
if (len != null) {
positionLength = len;
@@ -135,7 +135,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
out.writeInt(startOffset);
out.writeInt(endOffset);
out.writeVInt(position);
- if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
out.writeOptionalVInt(positionLength > 1 ? positionLength : null);
}
out.writeOptionalString(type);
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
index 251eed8bdb..f5c63bd470 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
@@ -78,7 +78,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
@Override
protected ClusterBlockException checkBlock(DeleteIndexRequest request, ClusterState state) {
- return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
+ return state.blocks().indicesAllowReleaseResources(indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java
index 8a8d1063f3..d20957c4bd 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java
@@ -62,7 +62,10 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
if (globalBlock != null) {
return globalBlock;
}
- if (request.settings().size() == 1 && IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings()) || IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())) {
+ if (request.settings().size() == 1 && // we have to allow resetting these settings otherwise users can't unblock an index
+ IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings())
+ || IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())
+ || IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.exists(request.settings())) {
return null;
}
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java
index f03bb49fda..df9c12c95f 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java
@@ -76,7 +76,7 @@ public class QueryExplanation implements Streamable {
@Override
public void readFrom(StreamInput in) throws IOException {
index = in.readString();
- if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
shard = in.readInt();
} else {
shard = RANDOM_SHARD;
@@ -89,7 +89,7 @@ public class QueryExplanation implements Streamable {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(index);
- if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
out.writeInt(shard);
}
out.writeBoolean(valid);
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
index 18ccf1ede7..5953a5548c 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java
@@ -154,7 +154,7 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
}
explain = in.readBoolean();
rewrite = in.readBoolean();
- if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
allShards = in.readBoolean();
}
}
@@ -169,7 +169,7 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
}
out.writeBoolean(explain);
out.writeBoolean(rewrite);
- if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
out.writeBoolean(allShards);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
index 68cede5d25..0ddd01187a 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java
@@ -37,7 +37,6 @@ import org.elasticsearch.common.xcontent.StatusToXContentObject;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.rest.RestStatus;
@@ -421,7 +420,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
@Override
public void readFrom(StreamInput in) throws IOException {
id = in.readVInt();
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
opType = OpType.fromId(in.readByte());
} else {
opType = OpType.fromString(in.readString());
@@ -448,7 +447,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(id);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
out.writeByte(opType.getId());
} else {
out.writeString(opType.getLowercase());
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
index 4e1419099b..5b99ed02cf 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java
@@ -415,10 +415,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
}
static {
- assert Version.CURRENT.minimumCompatibilityVersion().after(Version.V_5_0_0) == false:
+ assert Version.CURRENT.minimumCompatibilityVersion().after(Version.V_6_0_0_alpha1_UNRELEASED) == false:
"Remove logic handling NoOp result from primary response; see TODO in replicaItemExecutionMode" +
" as the current minimum compatible version [" +
- Version.CURRENT.minimumCompatibilityVersion() + "] is after 5.0";
+ Version.CURRENT.minimumCompatibilityVersion() + "] is after 6.0";
}
/**
diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java
index ce1ba28289..7a47405d92 100644
--- a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java
@@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
+import java.util.Objects;
import java.util.Set;
import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
@@ -78,6 +79,8 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
super.readFrom(in);
fields = in.readStringArray();
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
+ indices = in.readStringArray();
+ indicesOptions = IndicesOptions.readIndicesOptions(in);
mergeResults = in.readBoolean();
} else {
mergeResults = true;
@@ -89,6 +92,8 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
super.writeTo(out);
out.writeStringArray(fields);
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
+ out.writeStringArray(indices);
+ indicesOptions.writeIndicesOptions(out);
out.writeBoolean(mergeResults);
}
}
@@ -118,12 +123,12 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
* The list of indices to lookup
*/
public FieldCapabilitiesRequest indices(String... indices) {
- this.indices = indices;
+ this.indices = Objects.requireNonNull(indices, "indices must not be null");
return this;
}
public FieldCapabilitiesRequest indicesOptions(IndicesOptions indicesOptions) {
- this.indicesOptions = indicesOptions;
+ this.indicesOptions = Objects.requireNonNull(indicesOptions, "indices options must not be null");
return this;
}
diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java
index 6491b8ce4c..8fad95257a 100644
--- a/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java
+++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java
@@ -26,6 +26,7 @@ import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;
@@ -72,7 +73,14 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
final Map<String, OriginalIndices> remoteClusterIndices = remoteClusterService.groupIndices(request.indicesOptions(),
request.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
final OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
- final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, localIndices);
+ final String[] concreteIndices;
+ if (remoteClusterIndices.isEmpty() == false && localIndices.indices().length == 0) {
+ // in the case we have one or more remote indices but no local we don't expand to all local indices and just do remote
+ // indices
+ concreteIndices = Strings.EMPTY_ARRAY;
+ } else {
+ concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, localIndices);
+ }
final int totalNumRequest = concreteIndices.length + remoteClusterIndices.size();
final CountDown completionCounter = new CountDown(totalNumRequest);
final List<FieldCapabilitiesIndexResponse> indexResponses = Collections.synchronizedList(new ArrayList<>());
diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java
index 5f85b95c7f..44b330dc37 100644
--- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java
+++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStats.java
@@ -323,14 +323,14 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
out.writeLong(sumTotalTermFreq);
out.writeBoolean(isSearchable);
out.writeBoolean(isAggregatable);
- if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
out.writeBoolean(hasMinMax);
if (hasMinMax) {
writeMinMax(out);
}
} else {
assert hasMinMax : "cannot serialize null min/max fieldstats in a mixed-cluster " +
- "with pre-" + Version.V_5_2_0_UNRELEASED + " nodes, remote version [" + out.getVersion() + "]";
+ "with pre-" + Version.V_5_2_0 + " nodes, remote version [" + out.getVersion() + "]";
writeMinMax(out);
}
}
@@ -705,7 +705,7 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
boolean isSearchable = in.readBoolean();
boolean isAggregatable = in.readBoolean();
boolean hasMinMax = true;
- if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
hasMinMax = in.readBoolean();
}
switch (type) {
diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java
index f126c73d04..2046aeddc1 100644
--- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java
@@ -93,7 +93,7 @@ public class FieldStatsResponse extends BroadcastResponse {
for (Map.Entry<String, Map<String, FieldStats>> entry1 : indicesMergedFieldStats.entrySet()) {
out.writeString(entry1.getKey());
int size = entry1.getValue().size();
- if (out.getVersion().before(Version.V_5_2_0_UNRELEASED)) {
+ if (out.getVersion().before(Version.V_5_2_0)) {
// filter fieldstats without min/max information
for (FieldStats stats : entry1.getValue().values()) {
if (stats.hasMinMax() == false) {
@@ -103,7 +103,7 @@ public class FieldStatsResponse extends BroadcastResponse {
}
out.writeVInt(size);
for (Map.Entry<String, FieldStats> entry2 : entry1.getValue().entrySet()) {
- if (entry2.getValue().hasMinMax() || out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (entry2.getValue().hasMinMax() || out.getVersion().onOrAfter(Version.V_5_2_0)) {
out.writeString(entry2.getKey());
entry2.getValue().writeTo(out);
}
diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java
index 133a94e69a..d2f3a7d5e4 100644
--- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java
@@ -68,7 +68,7 @@ public class FieldStatsShardResponse extends BroadcastShardResponse {
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
final Map<String, FieldStats<?> > stats;
- if (out.getVersion().before(Version.V_5_2_0_UNRELEASED)) {
+ if (out.getVersion().before(Version.V_5_2_0)) {
/**
* FieldStats with null min/max are not (de)serializable in versions prior to {@link Version.V_5_2_0_UNRELEASED}
*/
diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
index 41780ca0c7..ee9a5b2dfe 100644
--- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -534,7 +534,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
pipeline = in.readOptionalString();
isRetry = in.readBoolean();
autoGeneratedTimestamp = in.readLong();
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
contentType = in.readOptionalWriteable(XContentType::readFrom);
} else {
contentType = XContentFactory.xContentType(source);
@@ -558,7 +558,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
out.writeBytesReference(source);
out.writeByte(opType.getId());
// ES versions below 5.1.2 don't know about resolveVersionDefaults but resolve the version eagerly (which messes with validation).
- if (out.getVersion().before(Version.V_5_1_2_UNRELEASED)) {
+ if (out.getVersion().before(Version.V_5_1_2)) {
out.writeLong(resolveVersionDefaults());
} else {
out.writeLong(version);
@@ -567,7 +567,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
out.writeOptionalString(pipeline);
out.writeBoolean(isRetry);
out.writeLong(autoGeneratedTimestamp);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
out.writeOptionalWriteable(contentType);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java
index f5d5ab2d34..394349ca01 100644
--- a/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java
@@ -80,7 +80,7 @@ public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest>
super.readFrom(in);
id = in.readString();
source = in.readBytesReference();
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType = XContentType.readFrom(in);
} else {
xContentType = XContentFactory.xContentType(source);
@@ -92,7 +92,7 @@ public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest>
super.writeTo(out);
out.writeString(id);
out.writeBytesReference(source);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType.writeTo(out);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java
index 170af18968..30beb32681 100644
--- a/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java
@@ -103,7 +103,7 @@ public class SimulatePipelineRequest extends ActionRequest {
id = in.readOptionalString();
verbose = in.readBoolean();
source = in.readBytesReference();
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType = XContentType.readFrom(in);
} else {
xContentType = XContentFactory.xContentType(source);
@@ -116,7 +116,7 @@ public class SimulatePipelineRequest extends ActionRequest {
out.writeOptionalString(id);
out.writeBoolean(verbose);
out.writeBytesReference(source);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType.writeTo(out);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/core/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
index 1ec21ab424..a8c5bdeacf 100644
--- a/core/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
+++ b/core/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
@@ -64,7 +64,7 @@ final class ExpandSearchPhase extends SearchPhase {
@Override
public void run() throws IOException {
- if (isCollapseRequest()) {
+ if (isCollapseRequest() && searchResponse.getHits().getHits().length > 0) {
SearchRequest searchRequest = context.getRequest();
CollapseBuilder collapseBuilder = searchRequest.source().collapse();
MultiSearchRequest multiRequest = new MultiSearchRequest();
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
index 2ecfc213f9..9dd2125d5e 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
@@ -120,7 +120,7 @@ public class SearchTransportService extends AbstractComponent {
// this used to be the QUERY_AND_FETCH which doesn't exists anymore.
final boolean fetchDocuments = request.numberOfShards() == 1;
Supplier<SearchPhaseResult> supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new;
- if (connection.getVersion().before(Version.V_5_3_0_UNRELEASED) && fetchDocuments) {
+ if (connection.getVersion().before(Version.V_5_3_0) && fetchDocuments) {
// this is a BWC layer for pre 5.3 indices
if (request.scroll() != null) {
/**
diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
index de2577f573..0fe83e2144 100644
--- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java
@@ -499,7 +499,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
if (in.readBoolean()) {
doc = in.readBytesReference();
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType = XContentType.readFrom(in);
} else {
xContentType = XContentFactory.xContentType(doc);
@@ -544,7 +544,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
out.writeBoolean(doc != null);
if (doc != null) {
out.writeBytesReference(doc);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType.writeTo(out);
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java b/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java
index 9da3167ae8..b7f2a96688 100644
--- a/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java
+++ b/core/src/main/java/org/elasticsearch/cluster/NamedDiff.java
@@ -30,7 +30,7 @@ public interface NamedDiff<T extends Diffable<T>> extends Diff<T>, NamedWriteabl
* The minimal version of the recipient this custom object can be sent to
*/
default Version getMinimalSupportedVersion() {
- return Version.CURRENT.minimumCompatibilityVersion();
+ return Version.CURRENT.minimumIndexCompatibilityVersion();
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java b/core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java
index 0797442209..b548b49fe1 100644
--- a/core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java
+++ b/core/src/main/java/org/elasticsearch/cluster/NamedDiffable.java
@@ -30,6 +30,6 @@ public interface NamedDiffable<T> extends Diffable<T>, NamedWriteable {
* The minimal version of the recipient this custom object can be sent to
*/
default Version getMinimalSupportedVersion() {
- return Version.CURRENT.minimumCompatibilityVersion();
+ return Version.CURRENT.minimumIndexCompatibilityVersion();
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java
index 446f4ae074..e0336f61e3 100644
--- a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java
+++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java
@@ -40,7 +40,7 @@ public class SnapshotDeletionsInProgress extends AbstractNamedDiffable<Custom> i
public static final String TYPE = "snapshot_deletions";
// the version where SnapshotDeletionsInProgress was introduced
- public static final Version VERSION_INTRODUCED = Version.V_5_2_0_UNRELEASED;
+ public static final Version VERSION_INTRODUCED = Version.V_5_2_0;
// the list of snapshot deletion request entries
private final List<Entry> entries;
diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
index 5b8d391889..53ac626820 100644
--- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
+++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
@@ -51,7 +51,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implement
// a snapshot in progress from a pre 5.2.x node
public static final long UNDEFINED_REPOSITORY_STATE_ID = -2L;
// the version where repository state ids were introduced
- private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_5_2_0_UNRELEASED;
+ private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_5_2_0;
@Override
public boolean equals(Object o) {
diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java
index f09e1dd9cd..133c20411f 100644
--- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java
+++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster.block;
+import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
@@ -43,18 +44,22 @@ public class ClusterBlock implements Streamable, ToXContent {
private boolean disableStatePersistence = false;
+ private boolean allowReleaseResources;
+
private RestStatus status;
ClusterBlock() {
}
- public ClusterBlock(int id, String description, boolean retryable, boolean disableStatePersistence, RestStatus status, EnumSet<ClusterBlockLevel> levels) {
+ public ClusterBlock(int id, String description, boolean retryable, boolean disableStatePersistence, boolean allowReleaseResources, RestStatus status,
+ EnumSet<ClusterBlockLevel> levels) {
this.id = id;
this.description = description;
this.retryable = retryable;
this.disableStatePersistence = disableStatePersistence;
this.status = status;
this.levels = levels;
+ this.allowReleaseResources = allowReleaseResources;
}
public int id() {
@@ -127,12 +132,17 @@ public class ClusterBlock implements Streamable, ToXContent {
final int len = in.readVInt();
ArrayList<ClusterBlockLevel> levels = new ArrayList<>(len);
for (int i = 0; i < len; i++) {
- levels.add(ClusterBlockLevel.fromId(in.readVInt()));
+ levels.add(in.readEnum(ClusterBlockLevel.class));
}
this.levels = EnumSet.copyOf(levels);
retryable = in.readBoolean();
disableStatePersistence = in.readBoolean();
status = RestStatus.readFrom(in);
+ if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
+ allowReleaseResources = in.readBoolean();
+ } else {
+ allowReleaseResources = false;
+ }
}
@Override
@@ -141,11 +151,14 @@ public class ClusterBlock implements Streamable, ToXContent {
out.writeString(description);
out.writeVInt(levels.size());
for (ClusterBlockLevel level : levels) {
- out.writeVInt(level.id());
+ out.writeEnum(level);
}
out.writeBoolean(retryable);
out.writeBoolean(disableStatePersistence);
RestStatus.writeTo(out, status);
+ if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
+ out.writeBoolean(allowReleaseResources);
+ }
}
@Override
@@ -176,4 +189,8 @@ public class ClusterBlock implements Streamable, ToXContent {
public int hashCode() {
return id;
}
+
+ public boolean isAllowReleaseResources() {
+ return allowReleaseResources;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java
index 9d39d410d0..177de711a6 100644
--- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java
+++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java
@@ -23,34 +23,11 @@ package org.elasticsearch.cluster.block;
import java.util.EnumSet;
public enum ClusterBlockLevel {
- READ(0),
- WRITE(1),
- METADATA_READ(2),
- METADATA_WRITE(3);
+ READ,
+ WRITE,
+ METADATA_READ,
+ METADATA_WRITE;
- public static final EnumSet<ClusterBlockLevel> ALL = EnumSet.of(READ, WRITE, METADATA_READ, METADATA_WRITE);
+ public static final EnumSet<ClusterBlockLevel> ALL = EnumSet.allOf(ClusterBlockLevel.class);
public static final EnumSet<ClusterBlockLevel> READ_WRITE = EnumSet.of(READ, WRITE);
-
- private final int id;
-
- ClusterBlockLevel(int id) {
- this.id = id;
- }
-
- public int id() {
- return this.id;
- }
-
- static ClusterBlockLevel fromId(int id) {
- if (id == 0) {
- return READ;
- } else if (id == 1) {
- return WRITE;
- } else if (id == 2) {
- return METADATA_READ;
- } else if (id == 3) {
- return METADATA_WRITE;
- }
- throw new IllegalArgumentException("No cluster block level matching [" + id + "]");
- }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java
index 2bdf560580..9e05d50831 100644
--- a/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java
+++ b/core/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java
@@ -70,11 +70,11 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
}
public Set<ClusterBlock> global(ClusterBlockLevel level) {
- return levelHolders[level.id()].global();
+ return levelHolders[level.ordinal()].global();
}
public ImmutableOpenMap<String, Set<ClusterBlock>> indices(ClusterBlockLevel level) {
- return levelHolders[level.id()].indices();
+ return levelHolders[level.ordinal()].indices();
}
private Set<ClusterBlock> blocksForIndex(ClusterBlockLevel level, String index) {
@@ -97,7 +97,7 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
.collect(toSet())));
}
- levelHolders[level.id()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build());
+ levelHolders[level.ordinal()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build());
}
return levelHolders;
}
@@ -203,6 +203,26 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
return new ClusterBlockException(unmodifiableSet(blocks.collect(toSet())));
}
+ /**
+ * Returns <code>true</code> iff non of the given have a {@link ClusterBlockLevel#METADATA_WRITE} in place where the
+ * {@link ClusterBlock#isAllowReleaseResources()} returns <code>false</code>. This is used in places where resources will be released
+ * like the deletion of an index to free up resources on nodes.
+ * @param indices the indices to check
+ */
+ public ClusterBlockException indicesAllowReleaseResources(String[] indices) {
+ final Function<String, Stream<ClusterBlock>> blocksForIndexAtLevel = index ->
+ blocksForIndex(ClusterBlockLevel.METADATA_WRITE, index).stream();
+ Stream<ClusterBlock> blocks = concat(
+ global(ClusterBlockLevel.METADATA_WRITE).stream(),
+ Stream.of(indices).flatMap(blocksForIndexAtLevel)).filter(clusterBlock -> clusterBlock.isAllowReleaseResources() == false);
+ Set<ClusterBlock> clusterBlocks = unmodifiableSet(blocks.collect(toSet()));
+ if (clusterBlocks.isEmpty()) {
+ return null;
+ }
+ return new ClusterBlockException(clusterBlocks);
+ }
+
+
@Override
public String toString() {
if (global.isEmpty() && indices().isEmpty()) {
@@ -270,8 +290,6 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
static class ImmutableLevelHolder {
- static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(emptySet(), ImmutableOpenMap.of());
-
private final Set<ClusterBlock> global;
private final ImmutableOpenMap<String, Set<ClusterBlock>> indices;
@@ -314,30 +332,31 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
}
public Builder addBlocks(IndexMetaData indexMetaData) {
+ String indexName = indexMetaData.getIndex().getName();
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
- addIndexBlock(indexMetaData.getIndex().getName(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
+ addIndexBlock(indexName, MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
}
if (IndexMetaData.INDEX_READ_ONLY_SETTING.get(indexMetaData.getSettings())) {
- addIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
+ addIndexBlock(indexName, IndexMetaData.INDEX_READ_ONLY_BLOCK);
}
if (IndexMetaData.INDEX_BLOCKS_READ_SETTING.get(indexMetaData.getSettings())) {
- addIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_READ_BLOCK);
+ addIndexBlock(indexName, IndexMetaData.INDEX_READ_BLOCK);
}
if (IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(indexMetaData.getSettings())) {
- addIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_WRITE_BLOCK);
+ addIndexBlock(indexName, IndexMetaData.INDEX_WRITE_BLOCK);
}
if (IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.get(indexMetaData.getSettings())) {
- addIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_METADATA_BLOCK);
+ addIndexBlock(indexName, IndexMetaData.INDEX_METADATA_BLOCK);
+ }
+ if (IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.get(indexMetaData.getSettings())) {
+ addIndexBlock(indexName, IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK);
}
return this;
}
public Builder updateBlocks(IndexMetaData indexMetaData) {
- removeIndexBlock(indexMetaData.getIndex().getName(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
- removeIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
- removeIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_READ_BLOCK);
- removeIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_WRITE_BLOCK);
- removeIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_METADATA_BLOCK);
+ // let's remove all blocks for this index and add them back -- no need to remove all individual blocks....
+ indices.remove(indexMetaData.getIndex().getName());
return addBlocks(indexMetaData);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
index f1f6f8aee2..591b83c0ef 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
@@ -131,10 +131,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
return proto;
}
- public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
- public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ));
- public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE));
- public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ));
+ public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
+ public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ));
+ public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE));
+ public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ));
+ public static final ClusterBlock INDEX_READ_ONLY_ALLOW_DELETE_BLOCK = new ClusterBlock(12, "index read-only / allow delete (api)", false, false, true, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.WRITE));
public enum State {
OPEN((byte) 0),
@@ -212,6 +213,10 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
public static final Setting<Boolean> INDEX_BLOCKS_METADATA_SETTING =
Setting.boolSetting(SETTING_BLOCKS_METADATA, false, Property.Dynamic, Property.IndexScope);
+ public static final String SETTING_READ_ONLY_ALLOW_DELETE = "index.blocks.read_only_allow_delete";
+ public static final Setting<Boolean> INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING =
+ Setting.boolSetting(SETTING_READ_ONLY_ALLOW_DELETE, false, Property.Dynamic, Property.IndexScope);
+
public static final String SETTING_VERSION_CREATED = "index.version.created";
public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string";
public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded";
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
index a13e5e21e5..e47585356a 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
@@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.CollectionUtil;
-import org.elasticsearch.Version;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.Diffable;
import org.elasticsearch.cluster.DiffableUtils;
@@ -119,7 +118,14 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
public static final Setting<Boolean> SETTING_READ_ONLY_SETTING =
Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope);
- public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
+ public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false,
+ false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
+
+ public static final Setting<Boolean> SETTING_READ_ONLY_ALLOW_DELETE_SETTING =
+ Setting.boolSetting("cluster.blocks.read_only_allow_delete", false, Property.Dynamic, Property.NodeScope);
+
+ public static final ClusterBlock CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK = new ClusterBlock(13, "cluster read-only / allow delete (api)",
+ false, false, true, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
public static final MetaData EMPTY_META_DATA = builder().build();
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
index 2a2c6c65b9..7f8a176243 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
@@ -54,7 +54,7 @@ import java.util.Set;
*/
public class MetaDataIndexStateService extends AbstractComponent {
- public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE);
+ public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false, false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE);
private final ClusterService clusterService;
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
index b4b1fc8051..653edcb9e8 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java
@@ -230,6 +230,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_BLOCK, IndexMetaData.INDEX_READ_ONLY_SETTING, openSettings);
+ maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING, openSettings);
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_METADATA_BLOCK, IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, openSettings);
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_WRITE_BLOCK, IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, openSettings);
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings);
diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
index caafb82c65..a651d957a9 100644
--- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
+++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java
@@ -34,7 +34,6 @@ import java.io.IOException;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
@@ -222,7 +221,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
this.ephemeralId = in.readString().intern();
this.hostName = in.readString().intern();
this.hostAddress = in.readString().intern();
- if (in.getVersion().onOrAfter(Version.V_5_0_3_UNRELEASED)) {
+ if (in.getVersion().after(Version.V_5_0_2)) {
this.address = new TransportAddress(in);
} else {
// we need to do this to preserve the host information during pinging and joining of a master. Since the version of the
diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java
index 3fcfdc0872..1c9aad322c 100644
--- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java
+++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java
@@ -47,6 +47,9 @@ public class DiscoveryNodeFilters {
Map<String, String> settingsMap = settings.getAsMap();
for (Map.Entry<String, String> entry : settingsMap.entrySet()) {
String propertyKey = entry.getKey();
+ if (entry.getValue() == null) {
+ continue; // this setting gets reset
+ }
if ("_ip".equals(propertyKey) || "_host_ip".equals(propertyKey) || "_publish_ip".equals(propertyKey)) {
for (String value : Strings.tokenizeToStringArray(entry.getValue(), ",")) {
if (InetAddresses.isInetAddress(value) == false) {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java
index 3740ded306..0d3fe2df92 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/NodeAllocationResult.java
@@ -80,7 +80,7 @@ public class NodeAllocationResult implements ToXContent, Writeable, Comparable<N
public NodeAllocationResult(StreamInput in) throws IOException {
node = new DiscoveryNode(in);
shardStoreInfo = in.readOptionalWriteable(ShardStoreInfo::new);
- if (in.getVersion().before(Version.V_5_2_1_UNRELEASED)) {
+ if (in.getVersion().before(Version.V_5_2_1)) {
canAllocateDecision = Decision.readFrom(in);
} else {
canAllocateDecision = in.readOptionalWriteable(Decision::readFrom);
@@ -93,7 +93,7 @@ public class NodeAllocationResult implements ToXContent, Writeable, Comparable<N
public void writeTo(StreamOutput out) throws IOException {
node.writeTo(out);
out.writeOptionalWriteable(shardStoreInfo);
- if (out.getVersion().before(Version.V_5_2_1_UNRELEASED)) {
+ if (out.getVersion().before(Version.V_5_2_1)) {
if (canAllocateDecision == null) {
Decision.NO.writeTo(out);
} else {
diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
index 0b3f4d4cbc..e79d46bb55 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
@@ -187,6 +187,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING,
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
MetaData.SETTING_READ_ONLY_SETTING,
+ MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
index 1b3d2f249b..9fcafcea3b 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
@@ -74,6 +74,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
IndexMetaData.INDEX_BLOCKS_READ_SETTING,
IndexMetaData.INDEX_BLOCKS_WRITE_SETTING,
IndexMetaData.INDEX_BLOCKS_METADATA_SETTING,
+ IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING,
IndexMetaData.INDEX_PRIORITY_SETTING,
IndexMetaData.INDEX_DATA_PATH_SETTING,
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING,
diff --git a/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java b/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java
index b6c93e389f..5ed7f40fad 100644
--- a/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java
+++ b/core/src/main/java/org/elasticsearch/common/transport/TransportAddress.java
@@ -74,8 +74,8 @@ public final class TransportAddress implements Writeable {
}
/**
- * Read from a stream and use the {@code hostString} when creating the InetAddress if the input comes from a version prior
- * {@link Version#V_5_0_3_UNRELEASED} as the hostString was not serialized
+ * Read from a stream and use the {@code hostString} when creating the InetAddress if the input comes from a version on or prior
+ * {@link Version#V_5_0_2} as the hostString was not serialized
*/
public TransportAddress(StreamInput in, @Nullable String hostString) throws IOException {
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // bwc layer for 5.x where we had more than one transport address
@@ -88,7 +88,7 @@ public final class TransportAddress implements Writeable {
final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
in.readFully(a);
final InetAddress inetAddress;
- if (in.getVersion().onOrAfter(Version.V_5_0_3_UNRELEASED)) {
+ if (in.getVersion().after(Version.V_5_0_2)) {
String host = in.readString(); // the host string was serialized so we can ignore the passed in version
inetAddress = InetAddress.getByAddress(host, a);
} else {
@@ -107,7 +107,7 @@ public final class TransportAddress implements Writeable {
byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
out.writeByte((byte) bytes.length); // 1 byte
out.write(bytes, 0, bytes.length);
- if (out.getVersion().onOrAfter(Version.V_5_0_3_UNRELEASED)) {
+ if (out.getVersion().after(Version.V_5_0_2)) {
out.writeString(address.getHostString());
}
// don't serialize scope ids over the network!!!!
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java
index 3a5e3b4dab..b37a6e14f0 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java
@@ -22,6 +22,7 @@ package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.node.Node;
import java.util.Arrays;
@@ -79,6 +80,33 @@ public class EsExecutors {
return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, new EsAbortPolicy(), contextHolder);
}
+ /**
+ * Return a new executor that will automatically adjust the queue size based on queue throughput.
+ *
+ * @param size number of fixed threads to use for executing tasks
+ * @param initialQueueCapacity initial size of the executor queue
+ * @param minQueueSize minimum queue size that the queue can be adjusted to
+ * @param maxQueueSize maximum queue size that the queue can be adjusted to
+ * @param frameSize number of tasks during which stats are collected before adjusting queue size
+ */
+ public static EsThreadPoolExecutor newAutoQueueFixed(String name, int size, int initialQueueCapacity, int minQueueSize,
+ int maxQueueSize, int frameSize, TimeValue targetedResponseTime,
+ ThreadFactory threadFactory, ThreadContext contextHolder) {
+ if (initialQueueCapacity == minQueueSize && initialQueueCapacity == maxQueueSize) {
+ return newFixed(name, size, initialQueueCapacity, threadFactory, contextHolder);
+ }
+
+ if (initialQueueCapacity <= 0) {
+ throw new IllegalArgumentException("initial queue capacity for [" + name + "] executor must be positive, got: " +
+ initialQueueCapacity);
+ }
+ ResizableBlockingQueue<Runnable> queue =
+ new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), initialQueueCapacity);
+ return new QueueResizingEsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS,
+ queue, minQueueSize, maxQueueSize, TimedRunnable::new, frameSize, targetedResponseTime, threadFactory,
+ new EsAbortPolicy(), contextHolder);
+ }
+
private static final ExecutorService DIRECT_EXECUTOR_SERVICE = new AbstractExecutorService() {
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java
index 9662292cf6..a1ac182b8d 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java
@@ -37,7 +37,7 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
/**
* Name used in error reporting.
*/
- private final String name;
+ protected final String name;
EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) {
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java
new file mode 100644
index 0000000000..854dc86231
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ResizableBlockingQueue;
+
+import java.util.Locale;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+import java.util.function.Supplier;
+import java.util.stream.Stream;
+
+/**
+ * An extension to thread pool executor, which automatically adjusts the queue size of the
+ * {@code ResizableBlockingQueue} according to Little's Law.
+ */
+public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecutor {
+
+ private static final Logger logger =
+ ESLoggerFactory.getLogger(QueueResizingEsThreadPoolExecutor.class);
+
+ private final Function<Runnable, Runnable> runnableWrapper;
+ private final ResizableBlockingQueue<Runnable> workQueue;
+ private final int tasksPerFrame;
+ private final int minQueueSize;
+ private final int maxQueueSize;
+ private final long targetedResponseTimeNanos;
+ // The amount the queue size is adjusted by for each calcuation
+ private static final int QUEUE_ADJUSTMENT_AMOUNT = 50;
+
+ private final AtomicLong totalTaskNanos = new AtomicLong(0);
+ private final AtomicInteger taskCount = new AtomicInteger(0);
+
+ private long startNs;
+
+ QueueResizingEsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
+ ResizableBlockingQueue<Runnable> workQueue, int minQueueSize, int maxQueueSize,
+ Function<Runnable, Runnable> runnableWrapper, final int tasksPerFrame,
+ TimeValue targetedResponseTime, ThreadFactory threadFactory, XRejectedExecutionHandler handler,
+ ThreadContext contextHolder) {
+ super(name, corePoolSize, maximumPoolSize, keepAliveTime, unit,
+ workQueue, threadFactory, handler, contextHolder);
+ this.runnableWrapper = runnableWrapper;
+ this.workQueue = workQueue;
+ this.tasksPerFrame = tasksPerFrame;
+ this.startNs = System.nanoTime();
+ this.minQueueSize = minQueueSize;
+ this.maxQueueSize = maxQueueSize;
+ this.targetedResponseTimeNanos = targetedResponseTime.getNanos();
+ logger.debug("thread pool [{}] will adjust queue by [{}] when determining automatic queue size",
+ name, QUEUE_ADJUSTMENT_AMOUNT);
+ }
+
+ @Override
+ protected void doExecute(final Runnable command) {
+ // we are submitting a task, it has not yet started running (because super.excute() has not
+ // been called), but it could be immediately run, or run at a later time. We need the time
+ // this task entered the queue, which we get by creating a TimedRunnable, which starts the
+ // clock as soon as it is created.
+ super.doExecute(this.runnableWrapper.apply(command));
+ }
+
+ /**
+ * Calculate task rate (λ), for a fixed number of tasks and time it took those tasks to be measured
+ *
+ * @param totalNumberOfTasks total number of tasks that were measured
+ * @param totalFrameFrameNanos nanoseconds during which the tasks were received
+ * @return the rate of tasks in the system
+ */
+ static double calculateLambda(final int totalNumberOfTasks, final long totalFrameFrameNanos) {
+ assert totalFrameFrameNanos > 0 : "cannot calculate for instantaneous tasks";
+ assert totalNumberOfTasks > 0 : "cannot calculate for no tasks";
+ // There is no set execution time, instead we adjust the time window based on the
+ // number of completed tasks, so there is no background thread required to update the
+ // queue size at a regular interval. This means we need to calculate our λ by the
+ // total runtime, rather than a fixed interval.
+
+ // λ = total tasks divided by measurement time
+ return (double) totalNumberOfTasks / totalFrameFrameNanos;
+ }
+
+ /**
+ * Calculate Little's Law (L), which is the "optimal" queue size for a particular task rate (lambda) and targeted response time.
+ *
+ * @param lambda the arrival rate of tasks in nanoseconds
+ * @param targetedResponseTimeNanos nanoseconds for the average targeted response rate of requests
+ * @return the optimal queue size for the give task rate and targeted response time
+ */
+ static int calculateL(final double lambda, final long targetedResponseTimeNanos) {
+ assert targetedResponseTimeNanos > 0 : "cannot calculate for instantaneous requests";
+ // L = λ * W
+ return Math.toIntExact((long)(lambda * targetedResponseTimeNanos));
+ }
+
+ /**
+ * Returns the current queue capacity
+ */
+ public int getCurrentCapacity() {
+ return workQueue.capacity();
+ }
+
+ @Override
+ protected void afterExecute(Runnable r, Throwable t) {
+ super.afterExecute(r, t);
+ // A task has been completed, it has left the building. We should now be able to get the
+ // total time as a combination of the time in the queue and time spent running the task. We
+ // only want runnables that did not throw errors though, because they could be fast-failures
+ // that throw off our timings, so only check when t is null.
+ assert r instanceof TimedRunnable : "expected only TimedRunnables in queue";
+ final long taskNanos = ((TimedRunnable) r).getTotalNanos();
+ final long totalNanos = totalTaskNanos.addAndGet(taskNanos);
+ if (taskCount.incrementAndGet() == this.tasksPerFrame) {
+ final long endTimeNs = System.nanoTime();
+ final long totalRuntime = endTimeNs - this.startNs;
+ // Reset the start time for all tasks. At first glance this appears to need to be
+ // volatile, since we are reading from a different thread when it is set, but it
+ // is protected by the taskCount memory barrier.
+ // See: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html
+ startNs = endTimeNs;
+
+ // Calculate the new desired queue size
+ try {
+ final double lambda = calculateLambda(tasksPerFrame, totalNanos);
+ final int desiredQueueSize = calculateL(lambda, targetedResponseTimeNanos);
+ if (logger.isDebugEnabled()) {
+ final long avgTaskTime = totalNanos / tasksPerFrame;
+ logger.debug("[{}]: there were [{}] tasks in [{}], avg task time: [{}], [{} tasks/s], " +
+ "optimal queue is [{}]",
+ name,
+ tasksPerFrame,
+ TimeValue.timeValueNanos(totalRuntime),
+ TimeValue.timeValueNanos(avgTaskTime),
+ String.format(Locale.ROOT, "%.2f", lambda * TimeValue.timeValueSeconds(1).nanos()),
+ desiredQueueSize);
+ }
+
+ final int oldCapacity = workQueue.capacity();
+
+ // Adjust the queue size towards the desired capacity using an adjust of
+ // QUEUE_ADJUSTMENT_AMOUNT (either up or down), keeping in mind the min and max
+ // values the queue size can have.
+ final int newCapacity =
+ workQueue.adjustCapacity(desiredQueueSize, QUEUE_ADJUSTMENT_AMOUNT, minQueueSize, maxQueueSize);
+ if (oldCapacity != newCapacity && logger.isDebugEnabled()) {
+ logger.debug("adjusted [{}] queue size by [{}], old capacity: [{}], new capacity: [{}]", name,
+ newCapacity > oldCapacity ? QUEUE_ADJUSTMENT_AMOUNT : -QUEUE_ADJUSTMENT_AMOUNT,
+ oldCapacity, newCapacity);
+ }
+ } catch (ArithmeticException e) {
+ // There was an integer overflow, so just log about it, rather than adjust the queue size
+ logger.warn((Supplier<?>) () -> new ParameterizedMessage(
+ "failed to calculate optimal queue size for [{}] thread pool, " +
+ "total frame time [{}ns], tasks [{}], task execution time [{}ns]",
+ name, totalRuntime, tasksPerFrame, totalNanos),
+ e);
+ } finally {
+ // Finally, decrement the task count and time back to their starting values. We
+ // do this at the end so there is no concurrent adjustments happening. We also
+ // decrement them instead of resetting them back to zero, as resetting them back
+ // to zero causes operations that came in during the adjustment to be uncounted
+ int tasks = taskCount.addAndGet(-this.tasksPerFrame);
+ assert tasks >= 0 : "tasks should never be negative, got: " + tasks;
+
+ if (tasks >= this.tasksPerFrame) {
+ // Start over, because we can potentially reach a "never adjusting" state,
+ //
+ // consider the following:
+ // - If the frame window is 10, and there are 10 tasks, then an adjustment will begin. (taskCount == 10)
+ // - Prior to the adjustment being done, 15 more tasks come in, the taskCount is now 25
+ // - Adjustment happens and we decrement the tasks by 10, taskCount is now 15
+ // - Since taskCount will now be incremented forever, it will never be 10 again,
+ // so there will be no further adjustments
+ logger.debug("[{}]: too many incoming tasks while queue size adjustment occurs, resetting measurements to 0", name);
+ totalTaskNanos.getAndSet(0);
+ taskCount.getAndSet(0);
+ startNs = System.nanoTime();
+ } else {
+ // Do a regular adjustment
+ totalTaskNanos.addAndGet(-totalNanos);
+ }
+ }
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder b = new StringBuilder();
+ b.append(getClass().getSimpleName()).append('[');
+ b.append(name).append(", ");
+
+ @SuppressWarnings("rawtypes")
+ ResizableBlockingQueue queue = (ResizableBlockingQueue) getQueue();
+
+ b.append("queue capacity = ").append(getCurrentCapacity()).append(", ");
+ b.append("min queue capacity = ").append(minQueueSize).append(", ");
+ b.append("max queue capacity = ").append(maxQueueSize).append(", ");
+ b.append("frame size = ").append(tasksPerFrame).append(", ");
+ b.append("targeted response rate = ").append(TimeValue.timeValueNanos(targetedResponseTimeNanos)).append(", ");
+ b.append("adjustment amount = ").append(QUEUE_ADJUSTMENT_AMOUNT).append(", ");
+ /*
+ * ThreadPoolExecutor has some nice information in its toString but we
+ * can't get at it easily without just getting the toString.
+ */
+ b.append(super.toString()).append(']');
+ return b.toString();
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ResizableBlockingQueue.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ResizableBlockingQueue.java
new file mode 100644
index 0000000000..ca6f6030bb
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ResizableBlockingQueue.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import java.util.concurrent.BlockingQueue;
+import org.elasticsearch.common.SuppressForbidden;
+
+/**
+ * Extends the {@code SizeBlockingQueue} to add the {@code adjustCapacity} method, which will adjust
+ * the capacity by a certain amount towards a maximum or minimum.
+ */
+final class ResizableBlockingQueue<E> extends SizeBlockingQueue<E> {
+
+ private volatile int capacity;
+
+ ResizableBlockingQueue(BlockingQueue<E> queue, int initialCapacity) {
+ super(queue, initialCapacity);
+ this.capacity = initialCapacity;
+ }
+
+ @SuppressForbidden(reason = "optimalCapacity is non-negative, therefore the difference cannot be < -Integer.MAX_VALUE")
+ private int getChangeAmount(int optimalCapacity) {
+ assert optimalCapacity >= 0 : "optimal capacity should always be positive, got: " + optimalCapacity;
+ return Math.abs(optimalCapacity - this.capacity);
+ }
+
+ @Override
+ public int capacity() {
+ return this.capacity;
+ }
+
+ @Override
+ public int remainingCapacity() {
+ return Math.max(0, this.capacity());
+ }
+
+ /** Resize the limit for the queue, returning the new size limit */
+ public synchronized int adjustCapacity(int optimalCapacity, int adjustmentAmount, int minCapacity, int maxCapacity) {
+ assert adjustmentAmount > 0 : "adjustment amount should be a positive value";
+ assert optimalCapacity >= 0 : "desired capacity cannot be negative";
+ assert minCapacity >= 0 : "cannot have min capacity smaller than 0";
+ assert maxCapacity >= minCapacity : "cannot have max capacity smaller than min capacity";
+
+ if (optimalCapacity == capacity) {
+ // Yahtzee!
+ return this.capacity;
+ }
+
+ if (optimalCapacity > capacity + adjustmentAmount) {
+ // adjust up
+ final int newCapacity = Math.min(maxCapacity, capacity + adjustmentAmount);
+ this.capacity = newCapacity;
+ return newCapacity;
+ } else if (optimalCapacity < capacity - adjustmentAmount) {
+ // adjust down
+ final int newCapacity = Math.max(minCapacity, capacity - adjustmentAmount);
+ this.capacity = newCapacity;
+ return newCapacity;
+ } else {
+ return this.capacity;
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java
index bff4ee613e..c4142152cc 100644
--- a/core/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java
@@ -131,7 +131,7 @@ public class SizeBlockingQueue<E> extends AbstractQueue<E> implements BlockingQu
@Override
public boolean offer(E e) {
int count = size.incrementAndGet();
- if (count > capacity) {
+ if (count > capacity()) {
size.decrementAndGet();
return false;
}
@@ -168,7 +168,7 @@ public class SizeBlockingQueue<E> extends AbstractQueue<E> implements BlockingQu
@Override
public int remainingCapacity() {
- return capacity - size.get();
+ return capacity() - size.get();
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/TimedRunnable.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/TimedRunnable.java
new file mode 100644
index 0000000000..91ad6e46ef
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/TimedRunnable.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+/**
+ * A class used to wrap a {@code Runnable} that allows capturing the time the task since creation
+ * through execution.
+ */
+class TimedRunnable implements Runnable {
+ private final Runnable original;
+ private final long creationTimeNanos;
+ private long finishTimeNanos = -1;
+
+ TimedRunnable(Runnable original) {
+ this.original = original;
+ this.creationTimeNanos = System.nanoTime();
+ }
+
+ @Override
+ public void run() {
+ try {
+ original.run();
+ } finally {
+ finishTimeNanos = System.nanoTime();
+ }
+ }
+
+ /**
+ * Return the time since this task was created until it finished running.
+ * If the task is still running or has not yet been run, returns -1.
+ */
+ long getTotalNanos() {
+ if (finishTimeNanos == -1) {
+ // There must have been an exception thrown, the total time is unknown (-1)
+ return -1;
+ }
+ return finishTimeNanos - creationTimeNanos;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java
index 6f5a6c9a74..e9a83678f8 100644
--- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java
+++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java
@@ -37,8 +37,8 @@ import java.util.EnumSet;
public class DiscoverySettings extends AbstractComponent {
public static final int NO_MASTER_BLOCK_ID = 2;
- public static final ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
- public static final ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
+ public static final ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
+ public static final ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
/**
* sets the timeout for a complete publishing cycle, including both sending and committing. the master
* will continue to process the next cluster state update after this time has elapsed
diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java
index d798f8b774..99a51adf96 100644
--- a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java
+++ b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java
@@ -223,7 +223,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateA
final String name = stateFile.getFileName().toString();
if (name.startsWith("metadata-")) {
throw new IllegalStateException("Detected pre 0.19 metadata file please upgrade to a version before "
- + Version.CURRENT.minimumCompatibilityVersion()
+ + Version.CURRENT.minimumIndexCompatibilityVersion()
+ " first to upgrade state structures - metadata found: [" + stateFile.getParent().toAbsolutePath());
}
}
@@ -294,7 +294,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateA
try (DirectoryStream<Path> stream = Files.newDirectoryStream(stateLocation, "shards-*")) {
for (Path stateFile : stream) {
throw new IllegalStateException("Detected pre 0.19 shard state file please upgrade to a version before "
- + Version.CURRENT.minimumCompatibilityVersion()
+ + Version.CURRENT.minimumIndexCompatibilityVersion()
+ " first to upgrade state structures - shard state found: [" + stateFile.getParent().toAbsolutePath());
}
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java
index 6e884af3b8..6b61e03443 100644
--- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java
+++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java
@@ -65,7 +65,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste
public static final Setting<Integer> RECOVER_AFTER_MASTER_NODES_SETTING =
Setting.intSetting("gateway.recover_after_master_nodes", 0, 0, Property.NodeScope);
- public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
+ public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
public static final TimeValue DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET = TimeValue.timeValueMinutes(5);
@@ -246,9 +246,14 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste
// automatically generate a UID for the metadata if we need to
metaDataBuilder.generateClusterUuidIfNeeded();
- if (MetaData.SETTING_READ_ONLY_SETTING.get(recoveredState.metaData().settings()) || MetaData.SETTING_READ_ONLY_SETTING.get(currentState.metaData().settings())) {
+ if (MetaData.SETTING_READ_ONLY_SETTING.get(recoveredState.metaData().settings())
+ || MetaData.SETTING_READ_ONLY_SETTING.get(currentState.metaData().settings())) {
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
}
+ if (MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(recoveredState.metaData().settings())
+ || MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(currentState.metaData().settings())) {
+ blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
+ }
for (IndexMetaData indexMetaData : recoveredState.metaData()) {
metaDataBuilder.put(indexMetaData, false);
diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java
index 94b5782a22..cf41ad8ec1 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexService.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexService.java
@@ -146,9 +146,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
this.mapperService = new MapperService(indexSettings, registry.build(indexSettings), xContentRegistry, similarityService,
mapperRegistry,
// we parse all percolator queries as they would be parsed on shard 0
- () -> newQueryShardContext(0, null, () -> {
- throw new IllegalArgumentException("Percolator queries are not allowed to use the current timestamp");
- }));
+ () -> newQueryShardContext(0, null, System::currentTimeMillis));
this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService);
if (indexSettings.getIndexSortConfig().hasIndexSort()) {
// we delay the actual creation of the sort order for this index because the mapping has not been merged yet.
diff --git a/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java
index b6eb84b03b..768f7e7fd0 100644
--- a/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java
@@ -213,7 +213,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
name = in.readOptionalString();
nestedPath = in.readOptionalString();
parentChildType = in.readOptionalString();
- if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
ignoreUnmapped = in.readBoolean();
}
from = in.readVInt();
@@ -254,7 +254,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
out.writeOptionalString(name);
out.writeOptionalString(nestedPath);
out.writeOptionalString(parentChildType);
- if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
out.writeBoolean(ignoreUnmapped);
}
out.writeVInt(from);
diff --git a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
index 553adf88b7..7889dee26d 100644
--- a/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java
@@ -228,7 +228,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
type = in.readOptionalString();
if (in.readBoolean()) {
doc = (BytesReference) in.readGenericValue();
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType = XContentType.readFrom(in);
} else {
xContentType = XContentFactory.xContentType(doc);
@@ -250,7 +250,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
out.writeBoolean(doc != null);
if (doc != null) {
out.writeGenericValue(doc);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType.writeTo(out);
}
} else {
diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
index fd6f33e27b..0269249e82 100644
--- a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java
@@ -212,11 +212,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
autoGeneratePhraseQueries = in.readBoolean();
allowLeadingWildcard = in.readOptionalBoolean();
analyzeWildcard = in.readOptionalBoolean();
- if (in.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().before(Version.V_5_1_1)) {
in.readBoolean(); // lowercase_expanded_terms
}
enablePositionIncrements = in.readBoolean();
- if (in.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().before(Version.V_5_1_1)) {
in.readString(); // locale
}
fuzziness = new Fuzziness(in);
@@ -232,7 +232,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
timeZone = in.readOptionalTimeZone();
escape = in.readBoolean();
maxDeterminizedStates = in.readVInt();
- if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
splitOnWhitespace = in.readBoolean();
useAllFields = in.readOptionalBoolean();
} else {
@@ -256,11 +256,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
out.writeBoolean(this.autoGeneratePhraseQueries);
out.writeOptionalBoolean(this.allowLeadingWildcard);
out.writeOptionalBoolean(this.analyzeWildcard);
- if (out.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().before(Version.V_5_1_1)) {
out.writeBoolean(true); // lowercase_expanded_terms
}
out.writeBoolean(this.enablePositionIncrements);
- if (out.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().before(Version.V_5_1_1)) {
out.writeString(Locale.ROOT.toLanguageTag()); // locale
}
this.fuzziness.writeTo(out);
@@ -276,7 +276,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
out.writeOptionalTimeZone(timeZone);
out.writeBoolean(this.escape);
out.writeVInt(this.maxDeterminizedStates);
- if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
out.writeBoolean(this.splitOnWhitespace);
out.writeOptionalBoolean(this.useAllFields);
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java
index bc1bd4bea9..22c17e4ddb 100644
--- a/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java
@@ -112,7 +112,7 @@ public class RangeQueryBuilder extends AbstractQueryBuilder<RangeQueryBuilder> i
if (formatString != null) {
format = Joda.forPattern(formatString);
}
- if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
String relationString = in.readOptionalString();
if (relationString != null) {
relation = ShapeRelation.getRelationByName(relationString);
@@ -133,7 +133,7 @@ public class RangeQueryBuilder extends AbstractQueryBuilder<RangeQueryBuilder> i
formatString = this.format.format();
}
out.writeOptionalString(formatString);
- if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
String relationString = null;
if (this.relation != null) {
relationString = this.relation.getRelationName();
diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
index 8312d56c3f..2efa4e815a 100644
--- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java
@@ -157,19 +157,19 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
flags = in.readInt();
analyzer = in.readOptionalString();
defaultOperator = Operator.readFromStream(in);
- if (in.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().before(Version.V_5_1_1)) {
in.readBoolean(); // lowercase_expanded_terms
}
settings.lenient(in.readBoolean());
- if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
this.lenientSet = in.readBoolean();
}
settings.analyzeWildcard(in.readBoolean());
- if (in.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().before(Version.V_5_1_1)) {
in.readString(); // locale
}
minimumShouldMatch = in.readOptionalString();
- if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
settings.quoteFieldSuffix(in.readOptionalString());
useAllFields = in.readOptionalBoolean();
}
@@ -186,19 +186,19 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
out.writeInt(flags);
out.writeOptionalString(analyzer);
defaultOperator.writeTo(out);
- if (out.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().before(Version.V_5_1_1)) {
out.writeBoolean(true); // lowercase_expanded_terms
}
out.writeBoolean(settings.lenient());
- if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
out.writeBoolean(lenientSet);
}
out.writeBoolean(settings.analyzeWildcard());
- if (out.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().before(Version.V_5_1_1)) {
out.writeString(Locale.ROOT.toLanguageTag()); // locale
}
out.writeOptionalString(minimumShouldMatch);
- if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
out.writeOptionalString(settings.quoteFieldSuffix());
out.writeOptionalBoolean(useAllFields);
}
diff --git a/core/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java b/core/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java
index 8fc16ae1b1..729bb47802 100644
--- a/core/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java
+++ b/core/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java
@@ -19,7 +19,6 @@
package org.elasticsearch.index.refresh;
-import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
@@ -106,20 +105,14 @@ public class RefreshStats implements Streamable, ToXContent {
public void readFrom(StreamInput in) throws IOException {
total = in.readVLong();
totalTimeInMillis = in.readVLong();
- if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
- listeners = in.readVInt();
- } else {
- listeners = 0;
- }
+ listeners = in.readVInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(total);
out.writeVLong(totalTimeInMillis);
- if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
- out.writeVInt(listeners);
- }
+ out.writeVInt(listeners);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/core/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java
index a582248af1..5bfae5fde9 100644
--- a/core/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java
+++ b/core/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java
@@ -402,7 +402,7 @@ public abstract class AbstractBulkByScrollRequest<Self extends AbstractBulkByScr
retryBackoffInitialTime = new TimeValue(in);
maxRetries = in.readVInt();
requestsPerSecond = in.readFloat();
- if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
slices = in.readVInt();
} else {
slices = 1;
@@ -421,12 +421,12 @@ public abstract class AbstractBulkByScrollRequest<Self extends AbstractBulkByScr
retryBackoffInitialTime.writeTo(out);
out.writeVInt(maxRetries);
out.writeFloat(requestsPerSecond);
- if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
out.writeVInt(slices);
} else {
if (slices > 1) {
throw new IllegalArgumentException("Attempting to send sliced reindex-style request to a node that doesn't support "
- + "it. Version is [" + out.getVersion() + "] but must be [" + Version.V_5_1_1_UNRELEASED + "]");
+ + "it. Version is [" + out.getVersion() + "] but must be [" + Version.V_5_1_1 + "]");
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/core/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
index 18c6dac920..284fea7a38 100644
--- a/core/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
+++ b/core/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
@@ -189,7 +189,7 @@ public abstract class BulkByScrollTask extends CancellableTask {
}
public Status(StreamInput in) throws IOException {
- if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
sliceId = in.readOptionalVInt();
} else {
sliceId = null;
@@ -207,7 +207,7 @@ public abstract class BulkByScrollTask extends CancellableTask {
requestsPerSecond = in.readFloat();
reasonCancelled = in.readOptionalString();
throttledUntil = new TimeValue(in);
- if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new));
} else {
sliceStatuses = emptyList();
@@ -216,7 +216,7 @@ public abstract class BulkByScrollTask extends CancellableTask {
@Override
public void writeTo(StreamOutput out) throws IOException {
- if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
out.writeOptionalVInt(sliceId);
}
out.writeVLong(total);
@@ -232,7 +232,7 @@ public abstract class BulkByScrollTask extends CancellableTask {
out.writeFloat(requestsPerSecond);
out.writeOptionalString(reasonCancelled);
throttledUntil.writeTo(out);
- if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
out.writeVInt(sliceStatuses.size());
for (StatusOrException sliceStatus : sliceStatuses) {
out.writeOptionalWriteable(sliceStatus);
diff --git a/core/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java b/core/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java
index 878a9c61e4..105afcc95b 100644
--- a/core/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java
+++ b/core/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java
@@ -90,7 +90,7 @@ public class RemoteInfo implements Writeable {
headers.put(in.readString(), in.readString());
}
this.headers = unmodifiableMap(headers);
- if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
socketTimeout = new TimeValue(in);
connectTimeout = new TimeValue(in);
} else {
@@ -112,7 +112,7 @@ public class RemoteInfo implements Writeable {
out.writeString(header.getKey());
out.writeString(header.getValue());
}
- if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
socketTimeout.writeTo(out);
connectTimeout.writeTo(out);
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index 84c3ee2ede..8da19d7cac 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -805,10 +805,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
final boolean force = request.force();
logger.trace("flush with {}", request);
/*
- * We allow flushes while recovery since we allow operations to happen while recovering and
- * we want to keep the translog under control (up to deletes, which we do not GC). Yet, we
- * do not use flush internally to clear deletes and flush the index writer since we use
- * Engine#writeIndexingBuffer for this now.
+ * We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under
+ * control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer
+ * since we use Engine#writeIndexingBuffer for this now.
*/
verifyNotClosed();
final Engine engine = getEngine();
@@ -1316,8 +1315,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
/**
- * Tests whether or not the translog should be flushed. This test is based on the current size
- * of the translog comparted to the configured flush threshold size.
+ * Tests whether or not the translog should be flushed. This test is based on the current size of the translog comparted to the
+ * configured flush threshold size.
*
* @return {@code true} if the translog should be flushed
*/
@@ -1335,9 +1334,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
/**
- * Tests whether or not the translog generation should be rolled to a new generation. This test
- * is based on the size of the current generation compared to the configured generation
- * threshold size.
+ * Tests whether or not the translog generation should be rolled to a new generation. This test is based on the size of the current
+ * generation compared to the configured generation threshold size.
*
* @return {@code true} if the current generation should be rolled to a new generation
*/
@@ -1919,21 +1917,19 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
private final AtomicBoolean flushOrRollRunning = new AtomicBoolean();
/**
- * Schedules a flush or translog generation roll if needed but will not schedule more than one
- * concurrently. The operation will be executed asynchronously on the flush thread pool.
+ * Schedules a flush or translog generation roll if needed but will not schedule more than one concurrently. The operation will be
+ * executed asynchronously on the flush thread pool.
*/
public void afterWriteOperation() {
if (shouldFlush() || shouldRollTranslogGeneration()) {
if (flushOrRollRunning.compareAndSet(false, true)) {
/*
- * We have to check again since otherwise there is a race when a thread passes the
- * first check next to another thread which performs the operation quickly enough to
- * finish before the current thread could flip the flag. In that situation, we have
- * an extra operation.
+ * We have to check again since otherwise there is a race when a thread passes the first check next to another thread which
+ * performs the operation quickly enough to finish before the current thread could flip the flag. In that situation, we
+ * have an extra operation.
*
- * Additionally, a flush implicitly executes a translog generation roll so if we
- * execute a flush then we do not need to check if we should roll the translog
- * generation.
+ * Additionally, a flush implicitly executes a translog generation roll so if we execute a flush then we do not need to
+ * check if we should roll the translog generation.
*/
if (shouldFlush()) {
logger.debug("submitting async flush request");
diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java
index ffe80a0f5f..d49edb33eb 100644
--- a/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java
+++ b/core/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java
@@ -278,22 +278,6 @@ public final class AnalysisModule {
* version uses a set of English stop words that are in
* lucene-analyzers-common so "stop" is defined in the analysis-common
* module. */
-
- // Add token filters declared in PreBuiltTokenFilters until they have all been migrated
- for (PreBuiltTokenFilters preBuilt : PreBuiltTokenFilters.values()) {
- switch (preBuilt) {
- case LOWERCASE:
- // This has been migrated but has to stick around until PreBuiltTokenizers is removed.
- continue;
- default:
- if (CachingStrategy.ONE != preBuilt.getCachingStrategy()) {
- throw new UnsupportedOperationException("shim not available for " + preBuilt.getCachingStrategy());
- }
- String name = preBuilt.name().toLowerCase(Locale.ROOT);
- preConfiguredTokenFilters.register(name, PreConfiguredTokenFilter.singleton(name, preBuilt.isMultiTermAware(),
- tokenStream -> preBuilt.create(tokenStream, Version.CURRENT)));
- }
- }
for (AnalysisPlugin plugin: plugins) {
for (PreConfiguredTokenFilter filter : plugin.getPreConfiguredTokenFilters()) {
diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java
index 427c0431fb..ba66c41e63 100644
--- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java
+++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java
@@ -20,38 +20,10 @@ package org.elasticsearch.indices.analysis;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
-import org.apache.lucene.analysis.ar.ArabicStemFilter;
-import org.apache.lucene.analysis.br.BrazilianStemFilter;
-import org.apache.lucene.analysis.cjk.CJKBigramFilter;
-import org.apache.lucene.analysis.cjk.CJKWidthFilter;
-import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter;
-import org.apache.lucene.analysis.core.DecimalDigitFilter;
-import org.apache.lucene.analysis.cz.CzechStemFilter;
-import org.apache.lucene.analysis.de.GermanNormalizationFilter;
-import org.apache.lucene.analysis.de.GermanStemFilter;
-import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
-import org.apache.lucene.analysis.fr.FrenchAnalyzer;
-import org.apache.lucene.analysis.hi.HindiNormalizationFilter;
-import org.apache.lucene.analysis.in.IndicNormalizationFilter;
-import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter;
-import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilter;
-import org.apache.lucene.analysis.miscellaneous.ScandinavianFoldingFilter;
-import org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter;
-import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter;
-import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
-import org.apache.lucene.analysis.shingle.ShingleFilter;
-import org.apache.lucene.analysis.snowball.SnowballFilter;
-import org.apache.lucene.analysis.tr.ApostropheFilter;
-import org.apache.lucene.analysis.util.ElisionFilter;
import org.elasticsearch.Version;
-import org.elasticsearch.index.analysis.DelimitedPayloadTokenFilterFactory;
-import org.elasticsearch.index.analysis.LimitTokenCountFilterFactory;
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
-import org.tartarus.snowball.ext.DutchStemmer;
-import org.tartarus.snowball.ext.FrenchStemmer;
import java.util.Locale;
@@ -66,229 +38,7 @@ public enum PreBuiltTokenFilters {
protected boolean isMultiTermAware() {
return true;
}
- },
-
- // Extended Token Filters
- ELISION(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new ElisionFilter(tokenStream, FrenchAnalyzer.DEFAULT_ARTICLES);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- ARABIC_STEM(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new ArabicStemFilter(tokenStream);
- }
- },
-
- BRAZILIAN_STEM(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new BrazilianStemFilter(tokenStream);
- }
- },
-
- CZECH_STEM(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new CzechStemFilter(tokenStream);
- }
- },
-
- DUTCH_STEM(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new SnowballFilter(tokenStream, new DutchStemmer());
- }
- },
-
- FRENCH_STEM(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new SnowballFilter(tokenStream, new FrenchStemmer());
- }
- },
-
- GERMAN_STEM(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new GermanStemFilter(tokenStream);
- }
- },
-
- RUSSIAN_STEM(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new SnowballFilter(tokenStream, "Russian");
- }
- },
-
- KEYWORD_REPEAT(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new KeywordRepeatFilter(tokenStream);
- }
- },
-
- ARABIC_NORMALIZATION(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new ArabicNormalizationFilter(tokenStream);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- PERSIAN_NORMALIZATION(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new PersianNormalizationFilter(tokenStream);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- TYPE_AS_PAYLOAD(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new TypeAsPayloadTokenFilter(tokenStream);
- }
- },
-
- SHINGLE(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new ShingleFilter(tokenStream);
- }
- },
-
- GERMAN_NORMALIZATION(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new GermanNormalizationFilter(tokenStream);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- HINDI_NORMALIZATION(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new HindiNormalizationFilter(tokenStream);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- INDIC_NORMALIZATION(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new IndicNormalizationFilter(tokenStream);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- SORANI_NORMALIZATION(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new SoraniNormalizationFilter(tokenStream);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- SCANDINAVIAN_NORMALIZATION(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new ScandinavianNormalizationFilter(tokenStream);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- SCANDINAVIAN_FOLDING(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new ScandinavianFoldingFilter(tokenStream);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- APOSTROPHE(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new ApostropheFilter(tokenStream);
- }
- },
-
- CJK_WIDTH(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new CJKWidthFilter(tokenStream);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- DECIMAL_DIGIT(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new DecimalDigitFilter(tokenStream);
- }
- @Override
- protected boolean isMultiTermAware() {
- return true;
- }
- },
-
- CJK_BIGRAM(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new CJKBigramFilter(tokenStream);
- }
- },
-
- DELIMITED_PAYLOAD_FILTER(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new DelimitedPayloadTokenFilter(tokenStream, DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER);
- }
- },
-
- LIMIT(CachingStrategy.ONE) {
- @Override
- public TokenStream create(TokenStream tokenStream, Version version) {
- return new LimitTokenCountFilter(tokenStream, LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT, LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS);
- }
- },
-
- ;
+ };
protected boolean isMultiTermAware() {
return false;
diff --git a/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java
index d58d76fb10..1d7ba958f1 100644
--- a/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java
+++ b/core/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java
@@ -118,7 +118,7 @@ public final class PipelineConfiguration extends AbstractDiffable<PipelineConfig
}
public static PipelineConfiguration readFrom(StreamInput in) throws IOException {
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
return new PipelineConfiguration(in.readString(), in.readBytesReference(), XContentType.readFrom(in));
} else {
final String id = in.readString();
@@ -135,7 +135,7 @@ public final class PipelineConfiguration extends AbstractDiffable<PipelineConfig
public void writeTo(StreamOutput out) throws IOException {
out.writeString(id);
out.writeBytesReference(config);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
xContentType.writeTo(out);
}
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java
index d01b1f9b43..c3783c600e 100644
--- a/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java
+++ b/core/src/main/java/org/elasticsearch/monitor/os/OsStats.java
@@ -52,7 +52,7 @@ public class OsStats implements Writeable, ToXContent {
this.cpu = new Cpu(in);
this.mem = new Mem(in);
this.swap = new Swap(in);
- if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
this.cgroup = in.readOptionalWriteable(Cgroup::new);
} else {
this.cgroup = null;
@@ -65,7 +65,7 @@ public class OsStats implements Writeable, ToXContent {
cpu.writeTo(out);
mem.writeTo(out);
swap.writeTo(out);
- if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
out.writeOptionalWriteable(cgroup);
}
}
diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java
index 943f9018e6..666cc22b92 100644
--- a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java
+++ b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java
@@ -81,7 +81,7 @@ public class PluginInfo implements Writeable, ToXContent {
this.description = in.readString();
this.version = in.readString();
this.classname = in.readString();
- if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
hasNativeController = in.readBoolean();
} else {
hasNativeController = false;
@@ -94,7 +94,7 @@ public class PluginInfo implements Writeable, ToXContent {
out.writeString(description);
out.writeString(version);
out.writeString(classname);
- if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
out.writeBoolean(hasNativeController);
}
}
diff --git a/core/src/main/java/org/elasticsearch/rest/RestStatus.java b/core/src/main/java/org/elasticsearch/rest/RestStatus.java
index d72eb2d11f..e7c07f2114 100644
--- a/core/src/main/java/org/elasticsearch/rest/RestStatus.java
+++ b/core/src/main/java/org/elasticsearch/rest/RestStatus.java
@@ -479,7 +479,7 @@ public enum RestStatus {
* is considered to be temporary. If the request that received this status code was the result of a user action,
* the request MUST NOT be repeated until it is requested by a separate user action.
*/
- INSUFFICIENT_STORAGE(506);
+ INSUFFICIENT_STORAGE(507);
private static final Map<Integer, RestStatus> CODE_TO_STATUS;
static {
diff --git a/core/src/main/java/org/elasticsearch/script/AbstractSearchScript.java b/core/src/main/java/org/elasticsearch/script/AbstractSearchScript.java
index 929046d4dd..50f90892a7 100644
--- a/core/src/main/java/org/elasticsearch/script/AbstractSearchScript.java
+++ b/core/src/main/java/org/elasticsearch/script/AbstractSearchScript.java
@@ -23,7 +23,6 @@ import org.apache.lucene.search.Scorer;
import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.search.lookup.LeafDocLookup;
import org.elasticsearch.search.lookup.LeafFieldsLookup;
-import org.elasticsearch.search.lookup.LeafIndexLookup;
import org.elasticsearch.search.lookup.LeafSearchLookup;
import org.elasticsearch.search.lookup.SourceLookup;
@@ -88,13 +87,6 @@ public abstract class AbstractSearchScript extends AbstractExecutableScript impl
}
/**
- * Allows to access statistics on terms and fields.
- */
- protected final LeafIndexLookup indexLookup() {
- return lookup.indexLookup();
- }
-
- /**
* Allows to access the *stored* fields.
*/
protected final LeafFieldsLookup fields() {
diff --git a/core/src/main/java/org/elasticsearch/script/NativeScriptEngine.java b/core/src/main/java/org/elasticsearch/script/NativeScriptEngine.java
index 7bd2f62e15..1b3fb23a7f 100644
--- a/core/src/main/java/org/elasticsearch/script/NativeScriptEngine.java
+++ b/core/src/main/java/org/elasticsearch/script/NativeScriptEngine.java
@@ -19,9 +19,12 @@
package org.elasticsearch.script;
+import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.LeafReaderContext;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.logging.DeprecationLogger;
+import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.lookup.SearchLookup;
@@ -41,6 +44,11 @@ public class NativeScriptEngine extends AbstractComponent implements ScriptEngin
public NativeScriptEngine(Settings settings, Map<String, NativeScriptFactory> scripts) {
super(settings);
+ if (scripts.isEmpty() == false) {
+ Logger logger = Loggers.getLogger(ScriptModule.class);
+ DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
+ deprecationLogger.deprecated("Native scripts are deprecated. Use a custom ScriptEngine to write scripts in java.");
+ }
this.scripts = unmodifiableMap(scripts);
}
diff --git a/core/src/main/java/org/elasticsearch/script/NativeScriptFactory.java b/core/src/main/java/org/elasticsearch/script/NativeScriptFactory.java
index 7fca250190..a53f711582 100644
--- a/core/src/main/java/org/elasticsearch/script/NativeScriptFactory.java
+++ b/core/src/main/java/org/elasticsearch/script/NativeScriptFactory.java
@@ -31,7 +31,9 @@ import java.util.Map;
* @see AbstractSearchScript
* @see AbstractLongSearchScript
* @see AbstractDoubleSearchScript
+ * @deprecated Create a {@link ScriptEngine} instead of using native scripts
*/
+@Deprecated
public interface NativeScriptFactory {
/**
diff --git a/core/src/main/java/org/elasticsearch/script/Script.java b/core/src/main/java/org/elasticsearch/script/Script.java
index 9f8a774398..7397ecfd89 100644
--- a/core/src/main/java/org/elasticsearch/script/Script.java
+++ b/core/src/main/java/org/elasticsearch/script/Script.java
@@ -485,7 +485,7 @@ public final class Script implements ToXContentObject, Writeable {
public Script(StreamInput in) throws IOException {
// Version 5.3 allows lang to be an optional parameter for stored scripts and expects
// options to be null for stored and file scripts.
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
this.type = ScriptType.readFrom(in);
this.lang = in.readOptionalString();
this.idOrCode = in.readString();
@@ -496,7 +496,7 @@ public final class Script implements ToXContentObject, Writeable {
// Version 5.1 to 5.3 (exclusive) requires all Script members to be non-null and supports the potential
// for more options than just XContentType. Reorders the read in contents to be in
// same order as the constructor.
- } else if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ } else if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
this.type = ScriptType.readFrom(in);
this.lang = in.readString();
@@ -554,7 +554,7 @@ public final class Script implements ToXContentObject, Writeable {
public void writeTo(StreamOutput out) throws IOException {
// Version 5.3+ allows lang to be an optional parameter for stored scripts and expects
// options to be null for stored and file scripts.
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
type.writeTo(out);
out.writeOptionalString(lang);
out.writeString(idOrCode);
@@ -565,7 +565,7 @@ public final class Script implements ToXContentObject, Writeable {
// Version 5.1 to 5.3 (exclusive) requires all Script members to be non-null and supports the potential
// for more options than just XContentType. Reorders the written out contents to be in
// same order as the constructor.
- } else if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ } else if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
type.writeTo(out);
if (lang == null) {
diff --git a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java
index 87afc21a8c..f69302ce01 100644
--- a/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java
+++ b/core/src/main/java/org/elasticsearch/script/ScriptMetaData.java
@@ -329,7 +329,7 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont
// Split the id to find the language then use StoredScriptSource to parse the
// expected BytesReference after which a new StoredScriptSource is created
// with the appropriate language and options.
- if (in.getVersion().before(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().before(Version.V_5_3_0)) {
int split = id.indexOf('#');
if (split == -1) {
@@ -353,7 +353,7 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont
public void writeTo(StreamOutput out) throws IOException {
// Version 5.3+ will output the contents of the scripts' Map using
// StoredScriptSource to stored the language, code, and options.
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
out.writeVInt(scripts.size());
for (Map.Entry<String, StoredScriptSource> entry : scripts.entrySet()) {
diff --git a/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java
index b4e6e257eb..7236e6eab3 100644
--- a/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java
+++ b/core/src/main/java/org/elasticsearch/script/StoredScriptSource.java
@@ -365,7 +365,7 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
* only the code parameter will be read in as a bytes reference.
*/
public StoredScriptSource(StreamInput in) throws IOException {
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
this.lang = in.readString();
this.code = in.readString();
@SuppressWarnings("unchecked")
@@ -385,7 +385,7 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
*/
@Override
public void writeTo(StreamOutput out) throws IOException {
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
out.writeString(lang);
out.writeString(code);
@SuppressWarnings("unchecked")
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java
index 5692b34c57..105dbbc545 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java
@@ -140,6 +140,9 @@ public class RangeAggregationBuilder extends AbstractRangeBuilder<RangeAggregati
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
// We need to call processRanges here so they are parsed before we make the decision of whether to cache the request
Range[] ranges = processRanges(context, config);
+ if (ranges.length == 0) {
+ throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation");
+ }
return new RangeAggregatorFactory(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder,
metaData);
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java
index de5622299c..2c686fbb97 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeAggregationBuilder.java
@@ -283,9 +283,12 @@ public class DateRangeAggregationBuilder extends AbstractRangeBuilder<DateRangeA
@Override
protected DateRangeAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig<Numeric> config,
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
- // We need to call processRanges here so they are parsed and we know whether `now` has been used before we make
+ // We need to call processRanges here so they are parsed and we know whether `now` has been used before we make
// the decision of whether to cache the request
Range[] ranges = processRanges(context, config);
+ if (ranges.length == 0) {
+ throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation");
+ }
return new DateRangeAggregatorFactory(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder,
metaData);
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java
index 9278a0b73b..1484fae8d4 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregationBuilder.java
@@ -384,6 +384,9 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde
ValuesSourceConfig<ValuesSource.GeoPoint> config, AggregatorFactory<?> parent, Builder subFactoriesBuilder)
throws IOException {
Range[] ranges = this.ranges.toArray(new Range[this.range().size()]);
+ if (ranges.length == 0) {
+ throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation");
+ }
return new GeoDistanceRangeAggregatorFactory(name, config, origin, ranges, unit, distanceType, keyed, context, parent,
subFactoriesBuilder, metaData);
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java
index cb03ef7251..c530ecbfa9 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ip/IpRangeAggregationBuilder.java
@@ -369,6 +369,9 @@ public final class IpRangeAggregationBuilder
AggregatorFactory<?> parent, Builder subFactoriesBuilder)
throws IOException {
List<BinaryRangeAggregator.Range> ranges = new ArrayList<>();
+ if(this.ranges.size() == 0){
+ throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation");
+ }
for (Range range : this.ranges) {
ranges.add(new BinaryRangeAggregator.Range(range.key, toBytesRef(range.from), toBytesRef(range.to)));
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java
index aabe5f585d..46e371a3df 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java
@@ -64,8 +64,8 @@ public class IncludeExclude implements Writeable, ToXContent {
public static final ParseField PARTITION_FIELD = new ParseField("partition");
public static final ParseField NUM_PARTITIONS_FIELD = new ParseField("num_partitions");
// Needed to add this seed for a deterministic term hashing policy
- // otherwise tests fail to get expected results and worse, shards
- // can disagree on which terms hash to the required partition.
+ // otherwise tests fail to get expected results and worse, shards
+ // can disagree on which terms hash to the required partition.
private static final int HASH_PARTITIONING_SEED = 31;
// for parsing purposes only
@@ -427,7 +427,7 @@ public class IncludeExclude implements Writeable, ToXContent {
} else {
excludeValues = null;
}
- if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
incNumPartitions = in.readVInt();
incZeroBasedPartition = in.readVInt();
} else {
@@ -460,7 +460,7 @@ public class IncludeExclude implements Writeable, ToXContent {
out.writeBytesRef(value);
}
}
- if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
out.writeVInt(incNumPartitions);
out.writeVInt(incZeroBasedPartition);
}
diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
index 37d7eb5b02..98c4f11059 100644
--- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
@@ -220,7 +220,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
profile = in.readBoolean();
searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new);
sliceBuilder = in.readOptionalWriteable(SliceBuilder::new);
- if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
collapse = in.readOptionalWriteable(CollapseBuilder::new);
}
}
@@ -271,7 +271,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
out.writeBoolean(profile);
out.writeOptionalWriteable(searchAfterBuilder);
out.writeOptionalWriteable(sliceBuilder);
- if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
out.writeOptionalWriteable(collapse);
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java
index 3a3c1cfd66..e5db6639ad 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java
@@ -128,14 +128,14 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
order(in.readOptionalWriteable(Order::readFromStream));
highlightFilter(in.readOptionalBoolean());
forceSource(in.readOptionalBoolean());
- if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
boundaryScannerType(in.readOptionalWriteable(BoundaryScannerType::readFromStream));
}
boundaryMaxScan(in.readOptionalVInt());
if (in.readBoolean()) {
boundaryChars(in.readString().toCharArray());
}
- if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
if (in.readBoolean()) {
boundaryScannerLocale(in.readString());
}
@@ -167,7 +167,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
out.writeOptionalWriteable(order);
out.writeOptionalBoolean(highlightFilter);
out.writeOptionalBoolean(forceSource);
- if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
out.writeOptionalWriteable(boundaryScannerType);
}
out.writeOptionalVInt(boundaryMaxScan);
@@ -176,7 +176,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
if (hasBounaryChars) {
out.writeString(String.valueOf(boundaryChars));
}
- if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
boolean hasBoundaryScannerLocale = boundaryScannerLocale != null;
out.writeBoolean(hasBoundaryScannerLocale);
if (hasBoundaryScannerLocale) {
diff --git a/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java b/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java
index 46fba77627..7c6bea8ce9 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java
@@ -54,7 +54,7 @@ public final class AliasFilter implements Writeable {
public AliasFilter(StreamInput input) throws IOException {
aliases = input.readStringArray();
- if (input.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (input.getVersion().onOrAfter(Version.V_5_1_1)) {
filter = input.readOptionalNamedWriteable(QueryBuilder.class);
reparseAliases = false;
} else {
@@ -90,7 +90,7 @@ public final class AliasFilter implements Writeable {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeStringArray(aliases);
- if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
out.writeOptionalNamedWriteable(filter);
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
index d21fc2faf5..d193812aba 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
@@ -181,7 +181,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
source = in.readOptionalWriteable(SearchSourceBuilder::new);
types = in.readStringArray();
aliasFilter = new AliasFilter(in);
- if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
indexBoost = in.readFloat();
} else {
// Nodes < 5.2.0 doesn't send index boost. Read it from source.
@@ -209,7 +209,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
out.writeOptionalWriteable(source);
out.writeStringArray(types);
aliasFilter.writeTo(out);
- if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
+ if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
out.writeFloat(indexBoost);
}
if (!asKey) {
diff --git a/core/src/main/java/org/elasticsearch/search/lookup/CachedPositionIterator.java b/core/src/main/java/org/elasticsearch/search/lookup/CachedPositionIterator.java
deleted file mode 100644
index 82604b1e0d..0000000000
--- a/core/src/main/java/org/elasticsearch/search/lookup/CachedPositionIterator.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.search.lookup;
-
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.IntsRefBuilder;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-/*
- * Can iterate over the positions of a term an arbitrary number of times.
- * */
-public class CachedPositionIterator extends PositionIterator {
-
- public CachedPositionIterator(IndexFieldTerm indexFieldTerm) {
- super(indexFieldTerm);
- }
-
- // all payloads of the term in the current document in one bytes array.
- // payloadStarts and payloadLength mark the start and end of one payload.
- final BytesRefBuilder payloads = new BytesRefBuilder();
-
- final IntsRefBuilder payloadsLengths = new IntsRefBuilder();
-
- final IntsRefBuilder payloadsStarts = new IntsRefBuilder();
-
- final IntsRefBuilder positions = new IntsRefBuilder();
-
- final IntsRefBuilder startOffsets = new IntsRefBuilder();
-
- final IntsRefBuilder endOffsets = new IntsRefBuilder();
-
- final BytesRef payload = new BytesRef();
-
- @Override
- public Iterator<TermPosition> reset() {
- return new Iterator<TermPosition>() {
- private int pos = 0;
- private final TermPosition termPosition = new TermPosition();
-
- @Override
- public boolean hasNext() {
- return pos < freq;
- }
-
- @Override
- public TermPosition next() {
- termPosition.position = positions.intAt(pos);
- termPosition.startOffset = startOffsets.intAt(pos);
- termPosition.endOffset = endOffsets.intAt(pos);
- termPosition.payload = payload;
- payload.bytes = payloads.bytes();
- payload.offset = payloadsStarts.intAt(pos);
- payload.length = payloadsLengths.intAt(pos);
- pos++;
- return termPosition;
- }
-
- @Override
- public void remove() {
- }
- };
- }
-
-
- private void record() throws IOException {
- TermPosition termPosition;
- for (int i = 0; i < freq; i++) {
- termPosition = super.next();
- positions.setIntAt(i, termPosition.position);
- addPayload(i, termPosition.payload);
- startOffsets.setIntAt(i, termPosition.startOffset);
- endOffsets.setIntAt(i, termPosition.endOffset);
- }
- }
- private void ensureSize(int freq) {
- if (freq == 0) {
- return;
- }
- startOffsets.grow(freq);
- endOffsets.grow(freq);
- positions.grow(freq);
- payloadsLengths.grow(freq);
- payloadsStarts.grow(freq);
- payloads.grow(freq * 8);// this is just a guess....
-
- }
-
- private void addPayload(int i, BytesRef currPayload) {
- if (currPayload != null) {
- payloadsLengths.setIntAt(i, currPayload.length);
- payloadsStarts.setIntAt(i, i == 0 ? 0 : payloadsStarts.intAt(i - 1) + payloadsLengths.intAt(i - 1));
- payloads.grow(payloadsStarts.intAt(i) + currPayload.length);
- System.arraycopy(currPayload.bytes, currPayload.offset, payloads.bytes(), payloadsStarts.intAt(i), currPayload.length);
- } else {
- payloadsLengths.setIntAt(i, 0);
- payloadsStarts.setIntAt(i, i == 0 ? 0 : payloadsStarts.intAt(i - 1) + payloadsLengths.intAt(i - 1));
- }
- }
-
-
- @Override
- public void nextDoc() throws IOException {
- super.nextDoc();
- ensureSize(freq);
- record();
- }
-
- @Override
- public TermPosition next() {
- throw new UnsupportedOperationException();
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/search/lookup/IndexField.java b/core/src/main/java/org/elasticsearch/search/lookup/IndexField.java
deleted file mode 100644
index 21803983d2..0000000000
--- a/core/src/main/java/org/elasticsearch/search/lookup/IndexField.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.search.lookup;
-
-import org.apache.lucene.search.CollectionStatistics;
-import org.elasticsearch.common.util.MinimalMap;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Script interface to all information regarding a field.
- * */
-public class IndexField extends MinimalMap<String, IndexFieldTerm> {
-
- /*
- * TermsInfo Objects that represent the Terms are stored in this map when
- * requested. Information such as frequency, doc frequency and positions
- * information can be retrieved from the TermInfo objects in this map.
- */
- private final Map<String, IndexFieldTerm> terms = new HashMap<>();
-
- // the name of this field
- private final String fieldName;
-
- /*
- * The holds the current reader. We need it to populate the field
- * statistics. We just delegate all requests there
- */
- private final LeafIndexLookup indexLookup;
-
- /*
- * General field statistics such as number of documents containing the
- * field.
- */
- private final CollectionStatistics fieldStats;
-
- /*
- * Represents a field in a document. Can be used to return information on
- * statistics of this field. Information on specific terms in this field can
- * be accessed by calling get(String term).
- */
- public IndexField(String fieldName, LeafIndexLookup indexLookup) throws IOException {
-
- assert fieldName != null;
- this.fieldName = fieldName;
-
- assert indexLookup != null;
- this.indexLookup = indexLookup;
-
- fieldStats = this.indexLookup.getIndexSearcher().collectionStatistics(fieldName);
- }
-
- /* get number of documents containing the field */
- public long docCount() throws IOException {
- return fieldStats.docCount();
- }
-
- /* get sum of the number of words over all documents that were indexed */
- public long sumttf() throws IOException {
- return fieldStats.sumTotalTermFreq();
- }
-
- /*
- * get the sum of doc frequencies over all words that appear in any document
- * that has the field.
- */
- public long sumdf() throws IOException {
- return fieldStats.sumDocFreq();
- }
-
- // TODO: might be good to get the field lengths here somewhere?
-
- /*
- * Returns a TermInfo object that can be used to access information on
- * specific terms. flags can be set as described in TermInfo.
- *
- * TODO: here might be potential for running time improvement? If we knew in
- * advance which terms are requested, we could provide an array which the
- * user could then iterate over.
- */
- public IndexFieldTerm get(Object key, int flags) {
- String termString = (String) key;
- IndexFieldTerm indexFieldTerm = terms.get(termString);
- // see if we initialized already...
- if (indexFieldTerm == null) {
- indexFieldTerm = new IndexFieldTerm(termString, fieldName, indexLookup, flags);
- terms.put(termString, indexFieldTerm);
- }
- indexFieldTerm.validateFlags(flags);
- return indexFieldTerm;
- }
-
- /*
- * Returns a TermInfo object that can be used to access information on
- * specific terms. flags can be set as described in TermInfo.
- */
- @Override
- public IndexFieldTerm get(Object key) {
- // per default, do not initialize any positions info
- return get(key, IndexLookup.FLAG_FREQUENCIES);
- }
-
- public void setDocIdInTerms(int docId) {
- for (IndexFieldTerm ti : terms.values()) {
- ti.setDocument(docId);
- }
- }
-
-}
diff --git a/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java b/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java
deleted file mode 100644
index fbca4f435b..0000000000
--- a/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.search.lookup;
-
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.FilterLeafReader.FilterPostingsEnum;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermContext;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.search.TermStatistics;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-import org.elasticsearch.ElasticsearchException;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-/**
- * Holds all information on a particular term in a field.
- * */
-public class IndexFieldTerm implements Iterable<TermPosition> {
-
- // The posting list for this term. Is null if the term or field does not
- // exist.
- PostingsEnum postings;
-
- // Stores if positions, offsets and payloads are requested.
- private final int flags;
-
- private final String fieldName;
-
- private final String term;
-
- private final PositionIterator iterator;
-
- // for lucene calls
- private final Term identifier;
-
- private final TermStatistics termStats;
-
- // get the document frequency of the term
- public long df() throws IOException {
- return termStats.docFreq();
- }
-
- // get the total term frequency of the term, that is, how often does the
- // term appear in any document?
- public long ttf() throws IOException {
- return termStats.totalTermFreq();
- }
-
- // when the reader changes, we have to get the posting list for this term
- // and reader
- private void setReader(LeafReader reader) {
- try {
- postings = getPostings(convertToLuceneFlags(flags), reader);
-
- if (postings == null) {
- // no term or field for this segment, fake out the postings...
- final DocIdSetIterator empty = DocIdSetIterator.empty();
- postings = new PostingsEnum() {
- @Override
- public int docID() {
- return empty.docID();
- }
-
- @Override
- public int nextDoc() throws IOException {
- return empty.nextDoc();
- }
-
- @Override
- public int advance(int target) throws IOException {
- return empty.advance(target);
- }
-
- @Override
- public long cost() {
- return empty.cost();
- }
-
- @Override
- public int freq() throws IOException {
- return 1;
- }
-
- @Override
- public int nextPosition() throws IOException {
- return -1;
- }
-
- @Override
- public int startOffset() throws IOException {
- return -1;
- }
-
- @Override
- public int endOffset() throws IOException {
- return -1;
- }
-
- @Override
- public BytesRef getPayload() throws IOException {
- return null;
- }
- };
- }
- } catch (IOException e) {
- throw new ElasticsearchException("Unable to get postings for field " + fieldName + " and term " + term, e);
- }
-
- }
-
- private int convertToLuceneFlags(int flags) {
- int lucenePositionsFlags = PostingsEnum.NONE;
- lucenePositionsFlags |= (flags & IndexLookup.FLAG_FREQUENCIES) > 0 ? PostingsEnum.FREQS : 0x0;
- lucenePositionsFlags |= (flags & IndexLookup.FLAG_POSITIONS) > 0 ? PostingsEnum.POSITIONS : 0x0;
- lucenePositionsFlags |= (flags & IndexLookup.FLAG_PAYLOADS) > 0 ? PostingsEnum.PAYLOADS : 0x0;
- lucenePositionsFlags |= (flags & IndexLookup.FLAG_OFFSETS) > 0 ? PostingsEnum.OFFSETS : 0x0;
- return lucenePositionsFlags;
- }
-
- private PostingsEnum getPostings(int luceneFlags, LeafReader reader) throws IOException {
- assert identifier.field() != null;
- assert identifier.bytes() != null;
- final Fields fields = reader.fields();
- PostingsEnum newPostings = null;
- if (fields != null) {
- final Terms terms = fields.terms(identifier.field());
- if (terms != null) {
- TermsEnum termsEnum = terms.iterator();
- if (termsEnum.seekExact(identifier.bytes())) {
- newPostings = termsEnum.postings(postings, luceneFlags);
- final Bits liveDocs = reader.getLiveDocs();
- if (liveDocs != null) {
- newPostings = new FilterPostingsEnum(newPostings) {
- private int doNext(int d) throws IOException {
- for (; d != NO_MORE_DOCS; d = super.nextDoc()) {
- if (liveDocs.get(d)) {
- return d;
- }
- }
- return NO_MORE_DOCS;
- }
- @Override
- public int nextDoc() throws IOException {
- return doNext(super.nextDoc());
- }
- @Override
- public int advance(int target) throws IOException {
- return doNext(super.advance(target));
- }
- };
- }
- }
- }
- }
- return newPostings;
- }
-
- private int freq = 0;
-
- public void setDocument(int docId) {
- assert (postings != null);
- try {
- // we try to advance to the current document.
- int currentDocPos = postings.docID();
- if (currentDocPos < docId) {
- currentDocPos = postings.advance(docId);
- }
- if (currentDocPos == docId) {
- freq = postings.freq();
- } else {
- freq = 0;
- }
- iterator.nextDoc();
- } catch (IOException e) {
- throw new ElasticsearchException("While trying to initialize term positions in IndexFieldTerm.setNextDoc() ", e);
- }
- }
-
- public IndexFieldTerm(String term, String fieldName, LeafIndexLookup indexLookup, int flags) {
- assert fieldName != null;
- this.fieldName = fieldName;
- assert term != null;
- this.term = term;
- assert indexLookup != null;
- identifier = new Term(fieldName, (String) term);
- this.flags = flags;
- boolean doRecord = ((flags & IndexLookup.FLAG_CACHE) > 0);
- if (!doRecord) {
- iterator = new PositionIterator(this);
- } else {
- iterator = new CachedPositionIterator(this);
- }
- setReader(indexLookup.getReader());
- setDocument(indexLookup.getDocId());
- try {
- termStats = indexLookup.getIndexSearcher().termStatistics(identifier,
- TermContext.build(indexLookup.getReaderContext(), identifier));
- } catch (IOException e) {
- throw new ElasticsearchException("Cannot get term statistics: ", e);
- }
- }
-
- public int tf() throws IOException {
- return freq;
- }
-
- @Override
- public Iterator<TermPosition> iterator() {
- return iterator.reset();
- }
-
- /*
- * A user might decide inside a script to call get with _POSITIONS and then
- * a second time with _PAYLOADS. If the positions were recorded but the
- * payloads were not, the user will not have access to them. Therefore, throw
- * exception here explaining how to call get().
- */
- public void validateFlags(int flags2) {
- if ((this.flags & flags2) < flags2) {
- throw new ElasticsearchException("You must call get with all required flags! Instead of " + getCalledStatement(flags2)
- + "call " + getCallStatement(flags2 | this.flags) + " once");
- }
- }
-
- private String getCalledStatement(int flags2) {
- String calledFlagsCall1 = getFlagsString(flags);
- String calledFlagsCall2 = getFlagsString(flags2);
- String callStatement1 = getCallStatement(calledFlagsCall1);
- String callStatement2 = getCallStatement(calledFlagsCall2);
- return " " + callStatement1 + " and " + callStatement2 + " ";
- }
-
- private String getCallStatement(String calledFlags) {
- return "_index['" + this.fieldName + "'].get('" + this.term + "', " + calledFlags + ")";
- }
-
- private String getFlagsString(int flags2) {
- String flagsString = null;
- if ((flags2 & IndexLookup.FLAG_FREQUENCIES) != 0) {
- flagsString = anddToFlagsString(flagsString, "_FREQUENCIES");
- }
- if ((flags2 & IndexLookup.FLAG_POSITIONS) != 0) {
- flagsString = anddToFlagsString(flagsString, "_POSITIONS");
- }
- if ((flags2 & IndexLookup.FLAG_OFFSETS) != 0) {
- flagsString = anddToFlagsString(flagsString, "_OFFSETS");
- }
- if ((flags2 & IndexLookup.FLAG_PAYLOADS) != 0) {
- flagsString = anddToFlagsString(flagsString, "_PAYLOADS");
- }
- if ((flags2 & IndexLookup.FLAG_CACHE) != 0) {
- flagsString = anddToFlagsString(flagsString, "_CACHE");
- }
- return flagsString;
- }
-
- private String anddToFlagsString(String flagsString, String flag) {
- if (flagsString != null) {
- flagsString += " | ";
- } else {
- flagsString = "";
- }
- flagsString += flag;
- return flagsString;
- }
-
- private String getCallStatement(int flags2) {
- String calledFlags = getFlagsString(flags2);
- String callStatement = getCallStatement(calledFlags);
- return " " + callStatement + " ";
-
- }
-
-}
diff --git a/core/src/main/java/org/elasticsearch/search/lookup/IndexLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/IndexLookup.java
deleted file mode 100644
index 485c690ef2..0000000000
--- a/core/src/main/java/org/elasticsearch/search/lookup/IndexLookup.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.search.lookup;
-
-import org.apache.lucene.index.LeafReaderContext;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import static java.util.Collections.unmodifiableMap;
-
-public class IndexLookup {
- public static final Map<String, Object> NAMES;
- static {
- Map<String, Object> names = new HashMap<>();
- names.put("_FREQUENCIES", IndexLookup.FLAG_FREQUENCIES);
- names.put("_POSITIONS", IndexLookup.FLAG_POSITIONS);
- names.put("_OFFSETS", IndexLookup.FLAG_OFFSETS);
- names.put("_PAYLOADS", IndexLookup.FLAG_PAYLOADS);
- names.put("_CACHE", IndexLookup.FLAG_CACHE);
- NAMES = unmodifiableMap(names);
- }
- /**
- * Flag to pass to {@link IndexField#get(Object, int)} if you require
- * offsets in the returned {@link IndexFieldTerm}.
- */
- public static final int FLAG_OFFSETS = 2;
-
- /**
- * Flag to pass to {@link IndexField#get(Object, int)} if you require
- * payloads in the returned {@link IndexFieldTerm}.
- */
- public static final int FLAG_PAYLOADS = 4;
-
- /**
- * Flag to pass to {@link IndexField#get(Object, int)} if you require
- * frequencies in the returned {@link IndexFieldTerm}. Frequencies might be
- * returned anyway for some lucene codecs even if this flag is no set.
- */
- public static final int FLAG_FREQUENCIES = 8;
-
- /**
- * Flag to pass to {@link IndexField#get(Object, int)} if you require
- * positions in the returned {@link IndexFieldTerm}.
- */
- public static final int FLAG_POSITIONS = 16;
-
- /**
- * Flag to pass to {@link IndexField#get(Object, int)} if you require
- * positions in the returned {@link IndexFieldTerm}.
- */
- public static final int FLAG_CACHE = 32;
-
- public static LeafIndexLookup getLeafIndexLookup(LeafReaderContext context) {
- return new LeafIndexLookup(context);
- }
-
-}
diff --git a/core/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java
deleted file mode 100644
index 9908f2830f..0000000000
--- a/core/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.search.lookup;
-
-import org.apache.logging.log4j.Logger;
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.search.IndexSearcher;
-import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.common.logging.DeprecationLogger;
-import org.elasticsearch.common.logging.Loggers;
-import org.elasticsearch.common.util.MinimalMap;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-public class LeafIndexLookup extends MinimalMap<String, IndexField> {
-
- // Current reader from which we can get the term vectors. No info on term
- // and field statistics.
- private final LeafReader reader;
-
- // The parent reader from which we can get proper field and term
- // statistics
- private final IndexReader parentReader;
-
- // we need this later to get the field and term statistics of the shard
- private final IndexSearcher indexSearcher;
-
- // current docId
- private int docId = -1;
-
- // stores the objects that are used in the script. we maintain this map
- // because we do not want to re-initialize the objects each time a field is
- // accessed
- private final Map<String, IndexField> indexFields = new HashMap<>();
-
- // number of documents per shard. cached here because the computation is
- // expensive
- private int numDocs = -1;
-
- // the maximum doc number of the shard.
- private int maxDoc = -1;
-
- // number of deleted documents per shard. cached here because the
- // computation is expensive
- private int numDeletedDocs = -1;
-
- private boolean deprecationEmitted = false;
-
- private void logDeprecation() {
- if (deprecationEmitted == false) {
- Logger logger = Loggers.getLogger(getClass());
- DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
- deprecationLogger.deprecated("Using _index is deprecated. Create a custom ScriptEngine to access index internals.");
- deprecationEmitted = true;
- }
- }
-
- public int numDocs() {
- logDeprecation();
- if (numDocs == -1) {
- numDocs = parentReader.numDocs();
- }
- return numDocs;
- }
-
- public int maxDoc() {
- logDeprecation();
- if (maxDoc == -1) {
- maxDoc = parentReader.maxDoc();
- }
- return maxDoc;
- }
-
- public int numDeletedDocs() {
- logDeprecation();
- if (numDeletedDocs == -1) {
- numDeletedDocs = parentReader.numDeletedDocs();
- }
- return numDeletedDocs;
- }
-
- public LeafIndexLookup(LeafReaderContext ctx) {
- reader = ctx.reader();
- parentReader = ReaderUtil.getTopLevelContext(ctx).reader();
- indexSearcher = new IndexSearcher(parentReader);
- indexSearcher.setQueryCache(null);
- }
-
- public void setDocument(int docId) {
- if (this.docId == docId) { // if we are called with the same docId,
- // nothing to do
- return;
- }
- // We assume that docs are processed in ascending order of id. If this
- // is not the case, we would have to re initialize all posting lists in
- // IndexFieldTerm. TODO: Instead of assert we could also call
- // setReaderInFields(); here?
- if (this.docId > docId) {
- // This might happen if the same SearchLookup is used in different
- // phases, such as score and fetch phase.
- // In this case we do not want to re initialize posting list etc.
- // because we do not even know if term and field statistics will be
- // needed in this new phase.
- // Therefore we just remove all IndexFieldTerms.
- indexFields.clear();
- }
- this.docId = docId;
- setNextDocIdInFields();
- }
-
- protected void setNextDocIdInFields() {
- for (IndexField stat : indexFields.values()) {
- stat.setDocIdInTerms(this.docId);
- }
- }
-
- /*
- * TODO: here might be potential for running time improvement? If we knew in
- * advance which terms are requested, we could provide an array which the
- * user could then iterate over.
- */
- @Override
- public IndexField get(Object key) {
- logDeprecation();
- String stringField = (String) key;
- IndexField indexField = indexFields.get(key);
- if (indexField == null) {
- try {
- indexField = new IndexField(stringField, this);
- indexFields.put(stringField, indexField);
- } catch (IOException e) {
- throw new ElasticsearchException(e);
- }
- }
- return indexField;
- }
-
- /*
- * Get the lucene term vectors. See
- * https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html
- * *
- */
- public Fields termVectors() throws IOException {
- logDeprecation();
- assert reader != null;
- return reader.getTermVectors(docId);
- }
-
- LeafReader getReader() {
- logDeprecation();
- return reader;
- }
-
- public int getDocId() {
- logDeprecation();
- return docId;
- }
-
- public IndexReader getParentReader() {
- logDeprecation();
- if (parentReader == null) {
- return reader;
- }
- return parentReader;
- }
-
- public IndexSearcher getIndexSearcher() {
- logDeprecation();
- return indexSearcher;
- }
-
- public IndexReaderContext getReaderContext() {
- logDeprecation();
- return getParentReader().getContext();
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/search/lookup/LeafSearchLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/LeafSearchLookup.java
index 5e41154662..e76206d6cb 100644
--- a/core/src/main/java/org/elasticsearch/search/lookup/LeafSearchLookup.java
+++ b/core/src/main/java/org/elasticsearch/search/lookup/LeafSearchLookup.java
@@ -35,24 +35,20 @@ public class LeafSearchLookup {
final LeafDocLookup docMap;
final SourceLookup sourceLookup;
final LeafFieldsLookup fieldsLookup;
- final LeafIndexLookup indexLookup;
final Map<String, Object> asMap;
public LeafSearchLookup(LeafReaderContext ctx, LeafDocLookup docMap, SourceLookup sourceLookup,
- LeafFieldsLookup fieldsLookup, LeafIndexLookup indexLookup, Map<String, Object> topLevelMap) {
+ LeafFieldsLookup fieldsLookup) {
this.ctx = ctx;
this.docMap = docMap;
this.sourceLookup = sourceLookup;
this.fieldsLookup = fieldsLookup;
- this.indexLookup = indexLookup;
- Map<String, Object> asMap = new HashMap<>(topLevelMap.size() + 5);
- asMap.putAll(topLevelMap);
+ Map<String, Object> asMap = new HashMap<>(4);
asMap.put("doc", docMap);
asMap.put("_doc", docMap);
asMap.put("_source", sourceLookup);
asMap.put("_fields", fieldsLookup);
- asMap.put("_index", indexLookup);
this.asMap = unmodifiableMap(asMap);
}
@@ -64,10 +60,6 @@ public class LeafSearchLookup {
return this.sourceLookup;
}
- public LeafIndexLookup indexLookup() {
- return this.indexLookup;
- }
-
public LeafFieldsLookup fields() {
return this.fieldsLookup;
}
@@ -80,6 +72,5 @@ public class LeafSearchLookup {
docMap.setDocument(docId);
sourceLookup.setSegmentAndDocument(ctx, docId);
fieldsLookup.setDocument(docId);
- indexLookup.setDocument(docId);
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/lookup/PositionIterator.java b/core/src/main/java/org/elasticsearch/search/lookup/PositionIterator.java
deleted file mode 100644
index c36a714894..0000000000
--- a/core/src/main/java/org/elasticsearch/search/lookup/PositionIterator.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.search.lookup;
-
-import org.apache.lucene.index.PostingsEnum;
-import org.elasticsearch.ElasticsearchException;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-public class PositionIterator implements Iterator<TermPosition> {
-
- private boolean resetted = false;
-
- protected IndexFieldTerm indexFieldTerm;
-
- protected int freq = -1;
-
- // current position of iterator
- private int currentPos;
-
- protected final TermPosition termPosition = new TermPosition();
-
- private PostingsEnum postings;
-
- public PositionIterator(IndexFieldTerm indexFieldTerm) {
- this.indexFieldTerm = indexFieldTerm;
- }
-
- @Override
- public void remove() {
- throw new UnsupportedOperationException("Cannot remove anything from TermPosition iterator.");
- }
-
- @Override
- public boolean hasNext() {
- return currentPos < freq;
- }
-
-
- @Override
- public TermPosition next() {
- try {
- termPosition.position = postings.nextPosition();
- termPosition.startOffset = postings.startOffset();
- termPosition.endOffset = postings.endOffset();
- termPosition.payload = postings.getPayload();
- } catch (IOException ex) {
- throw new ElasticsearchException("can not advance iterator", ex);
- }
- currentPos++;
- return termPosition;
- }
-
- public void nextDoc() throws IOException {
- resetted = false;
- currentPos = 0;
- freq = indexFieldTerm.tf();
- postings = indexFieldTerm.postings;
- }
-
- public Iterator<TermPosition> reset() {
- if (resetted) {
- throw new ElasticsearchException(
- "Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly.");
- }
- resetted = true;
- return this;
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java b/core/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
index aaa2baf62e..d2cee1109c 100644
--- a/core/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
+++ b/core/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java
@@ -42,9 +42,7 @@ public class SearchLookup {
return new LeafSearchLookup(context,
docMap.getLeafDocLookup(context),
sourceLookup,
- fieldsLookup.getLeafFieldsLookup(context),
- IndexLookup.getLeafIndexLookup(context),
- IndexLookup.NAMES);
+ fieldsLookup.getLeafFieldsLookup(context));
}
public DocLookup doc() {
diff --git a/core/src/main/java/org/elasticsearch/search/lookup/TermPosition.java b/core/src/main/java/org/elasticsearch/search/lookup/TermPosition.java
deleted file mode 100644
index 593452d385..0000000000
--- a/core/src/main/java/org/elasticsearch/search/lookup/TermPosition.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.search.lookup;
-
-import org.apache.lucene.analysis.payloads.PayloadHelper;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.CharsRefBuilder;
-
-public class TermPosition {
-
- public int position = -1;
- public int startOffset = -1;
- public int endOffset = -1;
- public BytesRef payload;
- private CharsRefBuilder spare = new CharsRefBuilder();
-
- public String payloadAsString() {
- if (payload != null && payload.length != 0) {
- spare.copyUTF8Bytes(payload);
- return spare.toString();
- } else {
- return null;
- }
- }
-
- public float payloadAsFloat(float defaultMissing) {
- if (payload != null && payload.length != 0) {
- return PayloadHelper.decodeFloat(payload.bytes, payload.offset);
- } else {
- return defaultMissing;
- }
- }
-
- public int payloadAsInt(int defaultMissing) {
- if (payload != null && payload.length != 0) {
- return PayloadHelper.decodeInt(payload.bytes, payload.offset);
- } else {
- return defaultMissing;
- }
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
index 433a631821..37cce0ac60 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
@@ -69,7 +69,7 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
private static final String TOTAL_SHARDS = "total_shards";
private static final String SUCCESSFUL_SHARDS = "successful_shards";
- private static final Version VERSION_INCOMPATIBLE_INTRODUCED = Version.V_5_2_0_UNRELEASED;
+ private static final Version VERSION_INCOMPATIBLE_INTRODUCED = Version.V_5_2_0;
public static final Version VERBOSE_INTRODUCED = Version.V_5_5_0_UNRELEASED;
private static final Comparator<SnapshotInfo> COMPARATOR =
diff --git a/core/src/main/java/org/elasticsearch/threadpool/AutoQueueAdjustingExecutorBuilder.java b/core/src/main/java/org/elasticsearch/threadpool/AutoQueueAdjustingExecutorBuilder.java
new file mode 100644
index 0000000000..265e544d28
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/threadpool/AutoQueueAdjustingExecutorBuilder.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.util.concurrent.ThreadContext;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.threadpool.ExecutorBuilder;
+import org.elasticsearch.common.util.concurrent.QueueResizingEsThreadPoolExecutor;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.ThreadFactory;
+
+/**
+ * A builder for executors that automatically adjust the queue length as needed, depending on
+ * Little's Law. See https://en.wikipedia.org/wiki/Little's_law for more information.
+ */
+public final class AutoQueueAdjustingExecutorBuilder extends ExecutorBuilder<AutoQueueAdjustingExecutorBuilder.AutoExecutorSettings> {
+
+ private final Setting<Integer> sizeSetting;
+ private final Setting<Integer> queueSizeSetting;
+ private final Setting<Integer> minQueueSizeSetting;
+ private final Setting<Integer> maxQueueSizeSetting;
+ private final Setting<TimeValue> targetedResponseTimeSetting;
+ private final Setting<Integer> frameSizeSetting;
+
+ AutoQueueAdjustingExecutorBuilder(final Settings settings, final String name, final int size,
+ final int initialQueueSize, final int minQueueSize,
+ final int maxQueueSize, final int frameSize) {
+ super(name);
+ final String prefix = "thread_pool." + name;
+ final String sizeKey = settingsKey(prefix, "size");
+ this.sizeSetting =
+ new Setting<>(
+ sizeKey,
+ s -> Integer.toString(size),
+ s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey),
+ Setting.Property.NodeScope);
+ final String queueSizeKey = settingsKey(prefix, "queue_size");
+ final String minSizeKey = settingsKey(prefix, "min_queue_size");
+ final String maxSizeKey = settingsKey(prefix, "max_queue_size");
+ final String frameSizeKey = settingsKey(prefix, "auto_queue_frame_size");
+ final String targetedResponseTimeKey = settingsKey(prefix, "target_response_time");
+ this.targetedResponseTimeSetting = Setting.timeSetting(targetedResponseTimeKey, TimeValue.timeValueSeconds(1),
+ TimeValue.timeValueMillis(10), Setting.Property.NodeScope);
+ this.queueSizeSetting = Setting.intSetting(queueSizeKey, initialQueueSize, Setting.Property.NodeScope);
+ // These temp settings are used to validate the min and max settings below
+ Setting<Integer> tempMaxQueueSizeSetting = Setting.intSetting(maxSizeKey, maxQueueSize, Setting.Property.NodeScope);
+ Setting<Integer> tempMinQueueSizeSetting = Setting.intSetting(minSizeKey, minQueueSize, Setting.Property.NodeScope);
+
+ this.minQueueSizeSetting = new Setting<>(
+ minSizeKey,
+ (s) -> Integer.toString(minQueueSize),
+ (s) -> Setting.parseInt(s, 0, tempMaxQueueSizeSetting.get(settings), minSizeKey),
+ Setting.Property.NodeScope);
+ this.maxQueueSizeSetting = new Setting<>(
+ maxSizeKey,
+ (s) -> Integer.toString(maxQueueSize),
+ (s) -> Setting.parseInt(s, tempMinQueueSizeSetting.get(settings), Integer.MAX_VALUE, maxSizeKey),
+ Setting.Property.NodeScope);
+ this.frameSizeSetting = Setting.intSetting(frameSizeKey, frameSize, 100, Setting.Property.NodeScope);
+ }
+
+ @Override
+ public List<Setting<?>> getRegisteredSettings() {
+ return Arrays.asList(sizeSetting, queueSizeSetting, minQueueSizeSetting,
+ maxQueueSizeSetting, frameSizeSetting, targetedResponseTimeSetting);
+ }
+
+ @Override
+ AutoExecutorSettings getSettings(Settings settings) {
+ final String nodeName = Node.NODE_NAME_SETTING.get(settings);
+ final int size = sizeSetting.get(settings);
+ final int initialQueueSize = queueSizeSetting.get(settings);
+ final int minQueueSize = minQueueSizeSetting.get(settings);
+ final int maxQueueSize = maxQueueSizeSetting.get(settings);
+ final int frameSize = frameSizeSetting.get(settings);
+ final TimeValue targetedResponseTime = targetedResponseTimeSetting.get(settings);
+ return new AutoExecutorSettings(nodeName, size, initialQueueSize, minQueueSize, maxQueueSize, frameSize, targetedResponseTime);
+ }
+
+ @Override
+ ThreadPool.ExecutorHolder build(final AutoExecutorSettings settings,
+ final ThreadContext threadContext) {
+ int size = settings.size;
+ int initialQueueSize = settings.initialQueueSize;
+ int minQueueSize = settings.minQueueSize;
+ int maxQueueSize = settings.maxQueueSize;
+ int frameSize = settings.frameSize;
+ TimeValue targetedResponseTime = settings.targetedResponseTime;
+ final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name()));
+ final ExecutorService executor =
+ EsExecutors.newAutoQueueFixed(name(), size, initialQueueSize, minQueueSize,
+ maxQueueSize, frameSize, targetedResponseTime, threadFactory, threadContext);
+ // TODO: in a subsequent change we hope to extend ThreadPool.Info to be more specific for the thread pool type
+ final ThreadPool.Info info =
+ new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.FIXED_AUTO_QUEUE_SIZE,
+ size, size, null, new SizeValue(initialQueueSize));
+ return new ThreadPool.ExecutorHolder(executor, info);
+ }
+
+ @Override
+ String formatInfo(ThreadPool.Info info) {
+ return String.format(
+ Locale.ROOT,
+ "name [%s], size [%d], queue size [%s]",
+ info.getName(),
+ info.getMax(),
+ info.getQueueSize() == null ? "unbounded" : info.getQueueSize());
+ }
+
+ static final class AutoExecutorSettings extends ExecutorBuilder.ExecutorSettings {
+
+ private final int size;
+ private final int initialQueueSize;
+ private final int minQueueSize;
+ private final int maxQueueSize;
+ private final int frameSize;
+ private final TimeValue targetedResponseTime;
+
+ AutoExecutorSettings(final String nodeName, final int size, final int initialQueueSize,
+ final int minQueueSize, final int maxQueueSize, final int frameSize,
+ final TimeValue targetedResponseTime) {
+ super(nodeName);
+ this.size = size;
+ this.initialQueueSize = initialQueueSize;
+ this.minQueueSize = minQueueSize;
+ this.maxQueueSize = maxQueueSize;
+ this.frameSize = frameSize;
+ this.targetedResponseTime = targetedResponseTime;
+ }
+
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java b/core/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java
index 54f5ab0af3..314eb1df71 100644
--- a/core/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java
+++ b/core/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java
@@ -21,6 +21,7 @@ package org.elasticsearch.threadpool;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import java.util.List;
@@ -46,6 +47,14 @@ public abstract class ExecutorBuilder<U extends ExecutorBuilder.ExecutorSettings
return String.join(".", prefix, key);
}
+ protected int applyHardSizeLimit(final Settings settings, final String name) {
+ if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {
+ return 1 + EsExecutors.numberOfProcessors(settings);
+ } else {
+ return Integer.MAX_VALUE;
+ }
+ }
+
/**
* The list of settings this builder will register.
*
diff --git a/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java b/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java
index 162dcc80d7..9bf9569d1e 100644
--- a/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java
+++ b/core/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java
@@ -76,14 +76,6 @@ public final class FixedExecutorBuilder extends ExecutorBuilder<FixedExecutorBui
Setting.intSetting(queueSizeKey, queueSize, Setting.Property.NodeScope);
}
- private int applyHardSizeLimit(final Settings settings, final String name) {
- if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {
- return 1 + EsExecutors.numberOfProcessors(settings);
- } else {
- return Integer.MAX_VALUE;
- }
- }
-
@Override
public List<Setting<?>> getRegisteredSettings() {
return Arrays.asList(sizeSetting, queueSizeSetting);
diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
index f72956c420..7b0c4eb752 100644
--- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
+++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
@@ -23,6 +23,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -85,6 +86,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
public enum ThreadPoolType {
DIRECT("direct"),
FIXED("fixed"),
+ FIXED_AUTO_QUEUE_SIZE("fixed_auto_queue_size"),
SCALING("scaling");
private final String type;
@@ -126,7 +128,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
map.put(Names.GET, ThreadPoolType.FIXED);
map.put(Names.INDEX, ThreadPoolType.FIXED);
map.put(Names.BULK, ThreadPoolType.FIXED);
- map.put(Names.SEARCH, ThreadPoolType.FIXED);
+ map.put(Names.SEARCH, ThreadPoolType.FIXED_AUTO_QUEUE_SIZE);
map.put(Names.MANAGEMENT, ThreadPoolType.SCALING);
map.put(Names.FLUSH, ThreadPoolType.SCALING);
map.put(Names.REFRESH, ThreadPoolType.SCALING);
@@ -171,7 +173,8 @@ public class ThreadPool extends AbstractComponent implements Closeable {
builders.put(Names.INDEX, new FixedExecutorBuilder(settings, Names.INDEX, availableProcessors, 200));
builders.put(Names.BULK, new FixedExecutorBuilder(settings, Names.BULK, availableProcessors, 200)); // now that we reuse bulk for index/delete ops
builders.put(Names.GET, new FixedExecutorBuilder(settings, Names.GET, availableProcessors, 1000));
- builders.put(Names.SEARCH, new FixedExecutorBuilder(settings, Names.SEARCH, searchThreadPoolSize(availableProcessors), 1000));
+ builders.put(Names.SEARCH, new AutoQueueAdjustingExecutorBuilder(settings,
+ Names.SEARCH, searchThreadPoolSize(availableProcessors), 1000, 1000, 1000, 2000));
builders.put(Names.MANAGEMENT, new ScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5)));
// no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded
// the assumption here is that the listeners should be very lightweight on the listeners side
@@ -608,7 +611,13 @@ public class ThreadPool extends AbstractComponent implements Closeable {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
- out.writeString(type.getType());
+ if (type == ThreadPoolType.FIXED_AUTO_QUEUE_SIZE &&
+ out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
+ // 5.x doesn't know about the "fixed_auto_queue_size" thread pool type, just write fixed.
+ out.writeString(ThreadPoolType.FIXED.getType());
+ } else {
+ out.writeString(type.getType());
+ }
out.writeInt(min);
out.writeInt(max);
out.writeOptionalWriteable(keepAlive);
diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java
index ba355e4147..5713cc27c0 100644
--- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java
+++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java
@@ -1324,9 +1324,15 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
}
streamIn = compressor.streamInput(streamIn);
}
- if (version.isCompatible(getCurrentVersion()) == false) {
+ // for handshakes we are compatible with N-2 since otherwise we can't figure out our initial version
+ // since we are compatible with N-1 and N+1 so we always send our minCompatVersion as the initial version in the
+ // handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility
+ // once the connection is established
+ final Version compatibilityVersion = TransportStatus.isHandshake(status) ? getCurrentVersion().minimumCompatibilityVersion()
+ : getCurrentVersion();
+ if (version.isCompatible(compatibilityVersion) == false) {
throw new IllegalStateException("Received message from unsupported version: [" + version
- + "] minimal compatible version is: [" + getCurrentVersion().minimumCompatibilityVersion() + "]");
+ + "] minimal compatible version is: [" + compatibilityVersion.minimumCompatibilityVersion() + "]");
}
streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry);
streamIn.setVersion(version);
diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java
index cec01732c4..a89fe23edb 100644
--- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java
+++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java
@@ -61,7 +61,6 @@ import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
-import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.node.Node;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.transport.TransportSettings;
@@ -74,7 +73,6 @@ import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Function;
@@ -103,9 +101,9 @@ import static java.util.Collections.unmodifiableMap;
public class TribeService extends AbstractLifecycleComponent {
public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false,
- RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE));
+ false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE));
public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false,
- RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE));
+ false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE));
public static Settings processSettings(Settings settings) {
if (TRIBE_NAME_SETTING.exists(settings)) {
diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
index c0430001bb..106c24982a 100644
--- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
+++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java
@@ -972,7 +972,7 @@ public class ExceptionSerializationTests extends ESTestCase {
try (StreamInput in = decoded.streamInput()) {
//randomize the version across released and unreleased ones
Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
+ Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0);
in.setVersion(version);
ElasticsearchException exception = new ElasticsearchException(in);
assertEquals("test message", exception.getMessage());
diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java
index 06a4fe117f..96a0c9aa81 100644
--- a/core/src/test/java/org/elasticsearch/VersionTests.java
+++ b/core/src/test/java/org/elasticsearch/VersionTests.java
@@ -33,7 +33,7 @@ import java.util.Locale;
import java.util.Map;
import java.util.Set;
-import static org.elasticsearch.Version.V_5_3_0_UNRELEASED;
+import static org.elasticsearch.Version.V_5_3_0;
import static org.elasticsearch.Version.V_6_0_0_alpha2_UNRELEASED;
import static org.elasticsearch.test.VersionUtils.randomVersion;
import static org.hamcrest.CoreMatchers.equalTo;
@@ -46,30 +46,30 @@ import static org.hamcrest.Matchers.sameInstance;
public class VersionTests extends ESTestCase {
public void testVersionComparison() throws Exception {
- assertThat(V_5_3_0_UNRELEASED.before(V_6_0_0_alpha2_UNRELEASED), is(true));
- assertThat(V_5_3_0_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false));
- assertThat(V_6_0_0_alpha2_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false));
+ assertThat(V_5_3_0.before(V_6_0_0_alpha2_UNRELEASED), is(true));
+ assertThat(V_5_3_0.before(V_5_3_0), is(false));
+ assertThat(V_6_0_0_alpha2_UNRELEASED.before(V_5_3_0), is(false));
- assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_6_0_0_alpha2_UNRELEASED), is(true));
- assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(true));
- assertThat(V_6_0_0_alpha2_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(false));
+ assertThat(V_5_3_0.onOrBefore(V_6_0_0_alpha2_UNRELEASED), is(true));
+ assertThat(V_5_3_0.onOrBefore(V_5_3_0), is(true));
+ assertThat(V_6_0_0_alpha2_UNRELEASED.onOrBefore(V_5_3_0), is(false));
- assertThat(V_5_3_0_UNRELEASED.after(V_6_0_0_alpha2_UNRELEASED), is(false));
- assertThat(V_5_3_0_UNRELEASED.after(V_5_3_0_UNRELEASED), is(false));
- assertThat(V_6_0_0_alpha2_UNRELEASED.after(V_5_3_0_UNRELEASED), is(true));
+ assertThat(V_5_3_0.after(V_6_0_0_alpha2_UNRELEASED), is(false));
+ assertThat(V_5_3_0.after(V_5_3_0), is(false));
+ assertThat(V_6_0_0_alpha2_UNRELEASED.after(V_5_3_0), is(true));
- assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_6_0_0_alpha2_UNRELEASED), is(false));
- assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true));
- assertThat(V_6_0_0_alpha2_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true));
+ assertThat(V_5_3_0.onOrAfter(V_6_0_0_alpha2_UNRELEASED), is(false));
+ assertThat(V_5_3_0.onOrAfter(V_5_3_0), is(true));
+ assertThat(V_6_0_0_alpha2_UNRELEASED.onOrAfter(V_5_3_0), is(true));
assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1")));
assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2")));
assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24")));
assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0")));
- assertThat(V_5_3_0_UNRELEASED, is(lessThan(V_6_0_0_alpha2_UNRELEASED)));
- assertThat(V_5_3_0_UNRELEASED.compareTo(V_5_3_0_UNRELEASED), is(0));
- assertThat(V_6_0_0_alpha2_UNRELEASED, is(greaterThan(V_5_3_0_UNRELEASED)));
+ assertThat(V_5_3_0, is(lessThan(V_6_0_0_alpha2_UNRELEASED)));
+ assertThat(V_5_3_0.compareTo(V_5_3_0), is(0));
+ assertThat(V_6_0_0_alpha2_UNRELEASED, is(greaterThan(V_5_3_0)));
}
public void testMin() {
@@ -100,7 +100,7 @@ public class VersionTests extends ESTestCase {
assertEquals(Version.V_5_0_0, Version.V_6_0_0_alpha2_UNRELEASED.minimumIndexCompatibilityVersion());
assertEquals(Version.fromId(2000099), Version.V_5_0_0.minimumIndexCompatibilityVersion());
assertEquals(Version.fromId(2000099),
- Version.V_5_1_1_UNRELEASED.minimumIndexCompatibilityVersion());
+ Version.V_5_1_1.minimumIndexCompatibilityVersion());
assertEquals(Version.fromId(2000099),
Version.V_5_0_0_alpha1.minimumIndexCompatibilityVersion());
}
@@ -157,7 +157,7 @@ public class VersionTests extends ESTestCase {
public void testIndexCreatedVersion() {
// an actual index has a IndexMetaData.SETTING_INDEX_UUID
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_2,
- Version.V_5_2_0_UNRELEASED, Version.V_6_0_0_alpha2_UNRELEASED);
+ Version.V_5_2_0, Version.V_6_0_0_alpha2_UNRELEASED);
assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build()));
}
@@ -311,8 +311,8 @@ public class VersionTests extends ESTestCase {
}
if (other.isAlpha() == false && version.isAlpha() == false
&& other.major == version.major && other.minor == version.minor) {
- assertEquals(other.luceneVersion.major, version.luceneVersion.major);
- assertEquals(other.luceneVersion.minor, version.luceneVersion.minor);
+ assertEquals(version + " vs. " + other, other.luceneVersion.major, version.luceneVersion.major);
+ assertEquals(version + " vs. " + other, other.luceneVersion.minor, version.luceneVersion.minor);
// should we also assert the lucene bugfix version?
}
}
@@ -326,11 +326,12 @@ public class VersionTests extends ESTestCase {
public void testIsCompatible() {
assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()));
- assertTrue(isCompatible(Version.V_5_0_0, Version.V_6_0_0_alpha2_UNRELEASED));
+ assertTrue(isCompatible(Version.V_5_5_0_UNRELEASED, Version.V_6_0_0_alpha2_UNRELEASED));
assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha2_UNRELEASED));
assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0));
}
+
public boolean isCompatible(Version left, Version right) {
boolean result = left.isCompatible(right);
assert result == right.isCompatible(left);
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java
index bd1377b89f..ad03d4b001 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java
@@ -122,5 +122,14 @@ public class SettingsUpdaterTests extends ESTestCase {
Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), false).build());
assertEquals(clusterState.blocks().global().size(), 0);
+
+ clusterState = updater.updateSettings(build, Settings.builder().put(MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true).build(),
+ Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6).put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build());
+ assertEquals(clusterState.blocks().global().size(), 1);
+ assertEquals(clusterState.blocks().global().iterator().next(), MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
+ clusterState = updater.updateSettings(build, Settings.EMPTY,
+ Settings.builder().put(MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), false).build());
+ assertEquals(clusterState.blocks().global().size(), 0);
+
}
}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java
index 5181e943c2..90eb7cdcfd 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java
@@ -93,7 +93,7 @@ public class ClusterSearchShardsResponseTests extends ESTestCase {
assertEquals(clusterSearchShardsGroup.getShardId(), deserializedGroup.getShardId());
assertArrayEquals(clusterSearchShardsGroup.getShards(), deserializedGroup.getShards());
}
- if (version.onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (version.onOrAfter(Version.V_5_1_1)) {
assertEquals(clusterSearchShardsResponse.getIndicesAndFilters(), deserialized.getIndicesAndFilters());
} else {
assertNull(deserialized.getIndicesAndFilters());
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java
index 5f3e107942..aec8349ea8 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java
@@ -51,7 +51,7 @@ public class PutStoredScriptRequestTests extends ESTestCase {
public void testSerializationBwc() throws IOException {
final byte[] rawStreamBytes = Base64.getDecoder().decode("ADwDCG11c3RhY2hlAQZzY3JpcHQCe30A");
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
+ Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0);
try (StreamInput in = StreamInput.wrap(rawStreamBytes)) {
in.setVersion(version);
PutStoredScriptRequest serialized = new PutStoredScriptRequest();
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java
index 95fa5b2600..2aaf2507e3 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java
@@ -28,6 +28,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
@ClusterScope(scope = ESIntegTestCase.Scope.TEST)
public class PendingTasksBlocksIT extends ESIntegTestCase {
@@ -36,7 +37,8 @@ public class PendingTasksBlocksIT extends ESIntegTestCase {
ensureGreen("test");
// This test checks that the Pending Cluster Tasks operation is never blocked, even if an index is read only or whatever.
- for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA,
+ SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("test", blockSetting);
PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().execute().actionGet();
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java
index dbc7e5cddc..ee1f4dd24e 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java
@@ -28,6 +28,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.equalTo;
@@ -52,7 +53,7 @@ public class ClearIndicesCacheBlocksIT extends ESIntegTestCase {
}
}
// Request is blocked
- for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA, SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("test", blockSetting);
assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true));
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java
index 590eba3666..97e1bf2930 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java
@@ -51,8 +51,8 @@ public class CreateIndexRequestTests extends ESTestCase {
public void testSerializationBwc() throws IOException {
final byte[] data = Base64.getDecoder().decode("ADwDAANmb28APAMBB215X3R5cGULeyJ0eXBlIjp7fX0AAAD////+AA==");
- final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
+ final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2, Version.V_5_1_1, Version.V_5_1_2,
+ Version.V_5_2_0);
try (StreamInput in = StreamInput.wrap(data)) {
in.setVersion(version);
CreateIndexRequest serialized = new CreateIndexRequest();
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java
index a83c209a3c..63cfc5da43 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java
@@ -19,22 +19,68 @@
package org.elasticsearch.action.admin.indices.delete;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESIntegTestCase;
-import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
-@ClusterScope(scope = ESIntegTestCase.Scope.TEST)
public class DeleteIndexBlocksIT extends ESIntegTestCase {
public void testDeleteIndexWithBlocks() {
createIndex("test");
ensureGreen("test");
-
try {
setClusterReadOnly(true);
- assertBlocked(client().admin().indices().prepareDelete("test"));
+ assertBlocked(client().admin().indices().prepareDelete("test"), MetaData.CLUSTER_READ_ONLY_BLOCK);
} finally {
setClusterReadOnly(false);
}
}
+
+ public void testDeleteIndexOnIndexReadOnlyAllowDeleteSetting() {
+ createIndex("test");
+ ensureGreen("test");
+ client().prepareIndex().setIndex("test").setType("doc").setId("1").setSource("foo", "bar").get();
+ refresh();
+ try {
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE, true).build();
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get());
+ assertSearchHits(client().prepareSearch().get(), "1");
+ assertBlocked(client().prepareIndex().setIndex("test").setType("doc").setId("2").setSource("foo", "bar"),
+ IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK);
+ assertBlocked(client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.builder().put("index.number_of_replicas", 2)), IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK);
+ assertSearchHits(client().prepareSearch().get(), "1");
+ assertAcked(client().admin().indices().prepareDelete("test"));
+ } finally {
+ Settings settings = Settings.builder().putNull(IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE).build();
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setIndicesOptions(IndicesOptions.lenientExpandOpen()).
+ setSettings(settings).get());
+ }
+ }
+
+ public void testDeleteIndexOnReadOnlyAllowDeleteSetting() {
+ createIndex("test");
+ ensureGreen("test");
+ client().prepareIndex().setIndex("test").setType("doc").setId("1").setSource("foo", "bar").get();
+ refresh();
+ try {
+ Settings settings = Settings.builder().put(MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true).build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
+ assertSearchHits(client().prepareSearch().get(), "1");
+ assertBlocked(client().prepareIndex().setIndex("test").setType("doc").setId("2").setSource("foo", "bar"),
+ MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
+ assertBlocked(client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.builder().put("index.number_of_replicas", 2)), MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
+ assertSearchHits(client().prepareSearch().get(), "1");
+ assertAcked(client().admin().indices().prepareDelete("test"));
+ } finally {
+ Settings settings = Settings.builder().putNull(MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey()).build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
+ }
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java
index 3ba349ffca..1ace701572 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java
@@ -28,6 +28,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.equalTo;
@@ -46,7 +47,8 @@ public class FlushBlocksIT extends ESIntegTestCase {
}
// Request is not blocked
- for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA,
+ SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("test", blockSetting);
FlushResponse response = client().admin().indices().prepareFlush("test").execute().actionGet();
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java
index e1f498b09b..aa6b7c6138 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java
@@ -28,6 +28,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.equalTo;
@@ -59,7 +60,7 @@ public class ForceMergeBlocksIT extends ESIntegTestCase {
}
// Request is blocked
- for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA, SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("test", blockSetting);
assertBlocked(client().admin().indices().prepareForceMerge("test"));
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java
index 722482837a..2bd13669fe 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java
@@ -38,6 +38,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
import static org.hamcrest.Matchers.anyOf;
@@ -178,7 +179,7 @@ public class GetIndexIT extends ESIntegTestCase {
}
public void testGetIndexWithBlocks() {
- for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("idx", block);
GetIndexResponse response = client().admin().indices().prepareGetIndex().addIndices("idx")
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java
index fd7f830e59..2870b04fdb 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java
@@ -94,7 +94,7 @@ public class PutMappingRequestTests extends ESTestCase {
public void testSerializationBwc() throws IOException {
final byte[] data = Base64.getDecoder().decode("ADwDAQNmb28MAA8tLS0KZm9vOiAiYmFyIgoAPAMAAAA=");
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
+ Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0);
try (StreamInput in = StreamInput.wrap(data)) {
in.setVersion(version);
PutMappingRequest request = new PutMappingRequest();
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java
index cc74f7c734..d69f7842bb 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java
@@ -29,7 +29,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.equalTo;
@@ -42,7 +42,8 @@ public class RefreshBlocksIT extends ESIntegTestCase {
NumShards numShards = getNumShards("test");
// Request is not blocked
- for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA,
+ SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("test", blockSetting);
RefreshResponse response = client().admin().indices().prepareRefresh("test").execute().actionGet();
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java
index 035c760d84..bcf106eda8 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksIT.java
@@ -28,6 +28,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
@@ -44,7 +45,8 @@ public class IndicesSegmentsBlocksIT extends ESIntegTestCase {
client().admin().indices().prepareFlush("test-blocks").get();
// Request is not blocked
- for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY,
+ SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("test-blocks", blockSetting);
IndicesSegmentResponse response = client().admin().indices().prepareSegments("test-blocks").execute().actionGet();
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java
index 25fdb7a84d..e7b477f61b 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksIT.java
@@ -29,6 +29,7 @@ import java.util.Arrays;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
@ClusterScope(scope = ESIntegTestCase.Scope.TEST)
public class IndicesStatsBlocksIT extends ESIntegTestCase {
@@ -37,7 +38,8 @@ public class IndicesStatsBlocksIT extends ESIntegTestCase {
ensureGreen("ro");
// Request is not blocked
- for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY,
+ SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("ro", blockSetting);
IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("ro").execute().actionGet();
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java
index 48b2ae79cf..453efb2a60 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java
@@ -89,7 +89,7 @@ public class PutIndexTemplateRequestTests extends ESTestCase {
public void testPutIndexTemplateRequestSerializationXContentBwc() throws IOException {
final byte[] data = Base64.getDecoder().decode("ADwDAANmb28IdGVtcGxhdGUAAAAAAAABA2Jhcg8tLS0KZm9vOiAiYmFyIgoAAAAAAAAAAAAAAAA=");
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
+ Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0);
try (StreamInput in = StreamInput.wrap(data)) {
in.setVersion(version);
PutIndexTemplateRequest request = new PutIndexTemplateRequest();
diff --git a/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java b/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java
index 6b68112d5d..8543b35569 100644
--- a/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.fieldcaps;
+import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.test.ESTestCase;
@@ -33,10 +34,52 @@ public class FieldCapabilitiesRequestTests extends ESTestCase {
for (int i = 0; i < size; i++) {
randomFields[i] = randomAlphaOfLengthBetween(5, 10);
}
+
+ size = randomIntBetween(0, 20);
+ String[] randomIndices = new String[size];
+ for (int i = 0; i < size; i++) {
+ randomIndices[i] = randomAlphaOfLengthBetween(5, 10);
+ }
request.fields(randomFields);
+ request.indices(randomIndices);
+ if (randomBoolean()) {
+ request.indicesOptions(randomBoolean() ? IndicesOptions.strictExpand() : IndicesOptions.lenientExpandOpen());
+ }
return request;
}
+ public void testEqualsAndHashcode() {
+ FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
+ request.indices("foo");
+ request.indicesOptions(IndicesOptions.lenientExpandOpen());
+ request.fields("bar");
+
+ FieldCapabilitiesRequest other = new FieldCapabilitiesRequest();
+ other.indices("foo");
+ other.indicesOptions(IndicesOptions.lenientExpandOpen());
+ other.fields("bar");
+ assertEquals(request, request);
+ assertEquals(request, other);
+ assertEquals(request.hashCode(), other.hashCode());
+
+ // change indices
+ other.indices("foo", "bar");
+ assertNotEquals(request, other);
+ other.indices("foo");
+ assertEquals(request, other);
+
+ // change fields
+ other.fields("foo", "bar");
+ assertNotEquals(request, other);
+ other.fields("bar");
+ assertEquals(request, request);
+
+ // change indices options
+ other.indicesOptions(IndicesOptions.strictExpand());
+ assertNotEquals(request, other);
+
+ }
+
public void testFieldCapsRequestSerialization() throws IOException {
for (int i = 0; i < 20; i++) {
FieldCapabilitiesRequest request = randomRequest();
diff --git a/core/src/test/java/org/elasticsearch/action/fieldstats/FieldStatsRequestTests.java b/core/src/test/java/org/elasticsearch/action/fieldstats/FieldStatsRequestTests.java
index f1cbaa9df3..309e844505 100644
--- a/core/src/test/java/org/elasticsearch/action/fieldstats/FieldStatsRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/fieldstats/FieldStatsRequestTests.java
@@ -113,7 +113,7 @@ public class FieldStatsRequestTests extends ESTestCase {
FieldStatsShardResponse deserialized = new FieldStatsShardResponse();
deserialized.readFrom(input);
final Map<String, FieldStats<?>> expected;
- if (version.before(Version.V_5_2_0_UNRELEASED)) {
+ if (version.before(Version.V_5_2_0)) {
expected = deserialized.filterNullMinMax();
} else {
expected = deserialized.getFieldStats();
diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
index 4fb1d0c648..73a44ff145 100644
--- a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
@@ -178,7 +178,7 @@ public class IndexRequestTests extends ESTestCase {
public void testIndexRequestXContentSerializationBwc() throws IOException {
final byte[] data = Base64.getDecoder().decode("AAD////+AgQDZm9vAAAAAQNiYXIBATEAAAAAAnt9AP/////////9AAAA//////////8AAAAAAAA=");
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
+ Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0);
try (StreamInput in = StreamInput.wrap(data)) {
in.setVersion(version);
IndexRequest serialized = new IndexRequest();
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java b/core/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java
index 01aed87947..37a15c5b35 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/PutPipelineRequestTests.java
@@ -49,7 +49,7 @@ public class PutPipelineRequestTests extends ESTestCase {
public void testSerializationBwc() throws IOException {
final byte[] data = Base64.getDecoder().decode("ADwDATECe30=");
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
+ Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0);
try (StreamInput in = StreamInput.wrap(data)) {
in.setVersion(version);
PutPipelineRequest request = new PutPipelineRequest();
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java b/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java
index e3ca936bb8..ecd0256b11 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestTests.java
@@ -74,7 +74,7 @@ public class SimulatePipelineRequestTests extends ESTestCase {
public void testSerializationWithXContentBwc() throws IOException {
final byte[] data = Base64.getDecoder().decode("AAAAAnt9AAA=");
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
+ Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0);
try (StreamInput in = StreamInput.wrap(data)) {
in.setVersion(version);
SimulatePipelineRequest request = new SimulatePipelineRequest();
diff --git a/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java b/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java
index 3407007d64..92e093350a 100644
--- a/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/main/MainActionTests.java
@@ -108,13 +108,13 @@ public class MainActionTests extends ESTestCase {
} else {
blocks = ClusterBlocks.builder()
.addGlobalBlock(new ClusterBlock(randomIntBetween(1, 16), "test global block 400", randomBoolean(), randomBoolean(),
- RestStatus.BAD_REQUEST, ClusterBlockLevel.ALL))
+ false, RestStatus.BAD_REQUEST, ClusterBlockLevel.ALL))
.build();
}
} else {
blocks = ClusterBlocks.builder()
.addGlobalBlock(new ClusterBlock(randomIntBetween(1, 16), "test global block 503", randomBoolean(), randomBoolean(),
- RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL))
+ false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL))
.build();
}
ClusterState state = ClusterState.builder(clusterName).blocks(blocks).build();
diff --git a/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java
index b7f0e0785f..255025302c 100644
--- a/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java
+++ b/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java
@@ -196,4 +196,35 @@ public class ExpandSearchPhaseTests extends ESTestCase {
assertNotNull(reference.get());
assertEquals(1, mockSearchPhaseContext.phasesExecuted.get());
}
+
+ public void testSkipExpandCollapseNoHits() throws IOException {
+ MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1);
+ mockSearchPhaseContext.searchTransport = new SearchTransportService(
+ Settings.builder().put("search.remote.connect", false).build(), null) {
+
+ @Override
+ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener<MultiSearchResponse> listener) {
+ fail("expand should not try to send empty multi search request");
+ }
+ };
+ mockSearchPhaseContext.getRequest().source(new SearchSourceBuilder()
+ .collapse(new CollapseBuilder("someField").setInnerHits(new InnerHitBuilder().setName("foobarbaz"))));
+
+ SearchHits hits = new SearchHits(new SearchHit[0], 1, 1.0f);
+ InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1);
+ SearchResponse response = mockSearchPhaseContext.buildSearchResponse(internalSearchResponse, null);
+ AtomicReference<SearchResponse> reference = new AtomicReference<>();
+ ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, response, r ->
+ new SearchPhase("test") {
+ @Override
+ public void run() throws IOException {
+ reference.set(r);
+ }
+ }
+ );
+ phase.run();
+ mockSearchPhaseContext.assertNoFailure();
+ assertNotNull(reference.get());
+ assertEquals(1, mockSearchPhaseContext.phasesExecuted.get());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java
index 93d8be990d..7e04e99b17 100644
--- a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java
@@ -262,7 +262,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ClusterBlocks.Builder block = ClusterBlocks.builder()
- .addGlobalBlock(new ClusterBlock(1, "test-block", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
+ .addGlobalBlock(new ClusterBlock(1, "test-block", false, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block));
try {
action.new AsyncAction(null, request, listener).start();
@@ -277,7 +277,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ClusterBlocks.Builder block = ClusterBlocks.builder()
- .addIndexBlock(TEST_INDEX, new ClusterBlock(1, "test-block", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
+ .addIndexBlock(TEST_INDEX, new ClusterBlock(1, "test-block", false, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block));
try {
action.new AsyncAction(null, request, listener).start();
diff --git a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java
index ae543aa14c..f8975a5369 100644
--- a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java
@@ -205,7 +205,7 @@ public class TransportMasterNodeActionTests extends ESTestCase {
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ClusterBlock block = new ClusterBlock(1, "", retryableBlock, true,
- randomFrom(RestStatus.values()), ClusterBlockLevel.ALL);
+ false, randomFrom(RestStatus.values()), ClusterBlockLevel.ALL);
ClusterState stateWithBlock = ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes))
.blocks(ClusterBlocks.builder().addGlobalBlock(block)).build();
setState(clusterService, stateWithBlock);
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
index db8855aadd..b402feb6d8 100644
--- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
@@ -194,8 +194,8 @@ public class TransportReplicationActionTests extends ESTestCase {
}
};
- ClusterBlocks.Builder block = ClusterBlocks.builder()
- .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
+ ClusterBlocks.Builder block = ClusterBlocks.builder().addGlobalBlock(new ClusterBlock(1, "non retryable", false, true,
+ false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block));
TestAction.ReroutePhase reroutePhase = action.new ReroutePhase(task, request, listener);
reroutePhase.run();
@@ -203,7 +203,7 @@ public class TransportReplicationActionTests extends ESTestCase {
assertPhase(task, "failed");
block = ClusterBlocks.builder()
- .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
+ .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block));
listener = new PlainActionFuture<>();
reroutePhase = action.new ReroutePhase(task, new Request().timeout("5ms"), listener);
@@ -219,8 +219,8 @@ public class TransportReplicationActionTests extends ESTestCase {
assertPhase(task, "waiting_for_retry");
assertTrue(request.isRetrySet.get());
- block = ClusterBlocks.builder()
- .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
+ block = ClusterBlocks.builder().addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, false,
+ RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block));
assertListenerThrows("primary phase should fail operation when moving from a retryable block to a non-retryable one", listener,
ClusterBlockException.class);
diff --git a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java
index c83a76ddc1..ba488cecb3 100644
--- a/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java
@@ -176,7 +176,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
Request request = new Request();
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ClusterBlocks.Builder block = ClusterBlocks.builder()
- .addGlobalBlock(new ClusterBlock(1, "", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
+ .addGlobalBlock(new ClusterBlock(1, "", false, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
setState(clusterService, ClusterState.builder(clusterService.state()).blocks(block));
try {
action.new AsyncSingleAction(request, listener).start();
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java
index 05e30d7e2d..15a2f9e74a 100644
--- a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java
@@ -66,7 +66,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
import static org.hamcrest.Matchers.equalTo;
public abstract class AbstractTermVectorsTestCase extends ESIntegTestCase {
-
protected static class TestFieldSetting {
public final String name;
public final boolean storedOffset;
@@ -211,7 +210,7 @@ public abstract class AbstractTermVectorsTestCase extends ESIntegTestCase {
Settings.Builder settings = Settings.builder()
.put(indexSettings())
.put("index.analysis.analyzer.tv_test.tokenizer", "standard")
- .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase");
+ .putArray("index.analysis.analyzer.tv_test.filter", "lowercase");
assertAcked(prepareCreate(index).addMapping("type1", mappingBuilder).setSettings(settings).addAlias(new Alias(alias)));
}
@@ -395,11 +394,7 @@ public abstract class AbstractTermVectorsTestCase extends ESIntegTestCase {
assertThat("Missing offset test failed" + failDesc, esDocsPosEnum.startOffset(), equalTo(-1));
assertThat("Missing offset test failed" + failDesc, esDocsPosEnum.endOffset(), equalTo(-1));
}
- if (field.storedPayloads && testConfig.requestPayloads) {
- assertThat("Payload test failed" + failDesc, luceneDocsPosEnum.getPayload(), equalTo(esDocsPosEnum.getPayload()));
- } else {
- assertThat("Missing payload test failed" + failDesc, esDocsPosEnum.getPayload(), equalTo(null));
- }
+ assertNull("Missing payload test failed" + failDesc, esDocsPosEnum.getPayload());
}
}
assertNull("Es returned terms are done but lucene isn't", luceneTermEnum.next());
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java
deleted file mode 100644
index 294a0ffde8..0000000000
--- a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.action.termvectors;
-
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.util.BytesRef;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.ToXContent;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.test.ESIntegTestCase;
-import org.hamcrest.Matchers;
-
-import java.io.IOException;
-
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
-import static org.hamcrest.Matchers.equalTo;
-
-public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase {
-
- @Override
- protected int numberOfShards() {
- return 1;
- }
-
- @Override
- protected int numberOfReplicas() {
- return 0;
- }
-
- @Override
- public Settings indexSettings() {
- return Settings.builder()
- .put(super.indexSettings())
- .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
- .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")
- .build();
- }
-
- public void testSimpleTermVectors() throws IOException {
- XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
- .startObject("properties")
- .startObject("field")
- .field("type", "text")
- .field("term_vector", "with_positions_offsets_payloads")
- .field("analyzer", "tv_test")
- .endObject()
- .endObject()
- .endObject().endObject();
- assertAcked(prepareCreate("test").addMapping("type1", mapping));
- ensureGreen();
- int numDocs = 15;
- for (int i = 0; i < numDocs; i++) {
- client().prepareIndex("test", "type1", Integer.toString(i))
- .setSource(XContentFactory.jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
- // 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
- // 31the34 35lazy39 40dog43
- .endObject()).execute().actionGet();
- refresh();
- }
- String[] values = { "brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the" };
- int[] freq = { 1, 1, 1, 1, 1, 1, 1, 2 };
- int[][] pos = { { 2 }, { 8 }, { 3 }, { 4 }, { 7 }, { 5 }, { 1 }, { 0, 6 } };
- int[][] startOffset = { { 10 }, { 40 }, { 16 }, { 20 }, { 35 }, { 26 }, { 4 }, { 0, 31 } };
- int[][] endOffset = { { 15 }, { 43 }, { 19 }, { 25 }, { 39 }, { 30 }, { 9 }, { 3, 34 } };
- for (int i = 0; i < numDocs; i++) {
- checkAllInfo(numDocs, values, freq, pos, startOffset, endOffset, i);
- checkWithoutTermStatistics(numDocs, values, freq, pos, startOffset, endOffset, i);
- checkWithoutFieldStatistics(numDocs, values, freq, pos, startOffset, endOffset, i);
- }
- }
-
- private void checkWithoutFieldStatistics(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset,
- int i) throws IOException {
- TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
- .setPositions(true).setTermStatistics(true).setFieldStatistics(false).setSelectedFields();
- TermVectorsResponse response = resp.execute().actionGet();
- assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
- Fields fields = response.getFields();
- assertThat(fields.size(), equalTo(1));
- Terms terms = fields.terms("field");
- assertThat(terms.size(), equalTo(8L));
- assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) -1));
- assertThat(terms.getDocCount(), Matchers.equalTo(-1));
- assertThat(terms.getSumDocFreq(), equalTo((long) -1));
- TermsEnum iterator = terms.iterator();
- for (int j = 0; j < values.length; j++) {
- String string = values[j];
- BytesRef next = iterator.next();
- assertThat(next, Matchers.notNullValue());
- assertThat("expected " + string, string, equalTo(next.utf8ToString()));
- assertThat(next, Matchers.notNullValue());
- if (string.equals("the")) {
- assertThat("expected ttf of " + string, numDocs * 2, equalTo((int) iterator.totalTermFreq()));
- } else {
- assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq()));
- }
-
- PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL);
- assertThat(docsAndPositions.nextDoc(), equalTo(0));
- assertThat(freq[j], equalTo(docsAndPositions.freq()));
- assertThat(iterator.docFreq(), equalTo(numDocs));
- int[] termPos = pos[j];
- int[] termStartOffset = startOffset[j];
- int[] termEndOffset = endOffset[j];
- assertThat(termPos.length, equalTo(freq[j]));
- assertThat(termStartOffset.length, equalTo(freq[j]));
- assertThat(termEndOffset.length, equalTo(freq[j]));
- for (int k = 0; k < freq[j]; k++) {
- int nextPosition = docsAndPositions.nextPosition();
- assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
- assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
- assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
- assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
- }
- }
- assertThat(iterator.next(), Matchers.nullValue());
-
- XContentBuilder xBuilder = XContentFactory.jsonBuilder();
- response.toXContent(xBuilder, null);
- String utf8 = xBuilder.bytes().utf8ToString().replaceFirst("\"took\":\\d+,", "");;
- String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
- + i
- + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"terms\":{\"brown\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"doc_freq\":15,\"ttf\":30,\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
- assertThat(utf8, equalTo(expectedString));
-
- }
-
- private void checkWithoutTermStatistics(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset,
- int i) throws IOException {
- TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
- .setPositions(true).setTermStatistics(false).setFieldStatistics(true).setSelectedFields();
- assertThat(resp.request().termStatistics(), equalTo(false));
- TermVectorsResponse response = resp.execute().actionGet();
- assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
- Fields fields = response.getFields();
- assertThat(fields.size(), equalTo(1));
- Terms terms = fields.terms("field");
- assertThat(terms.size(), equalTo(8L));
- assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
- assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
- assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
- TermsEnum iterator = terms.iterator();
- for (int j = 0; j < values.length; j++) {
- String string = values[j];
- BytesRef next = iterator.next();
- assertThat(next, Matchers.notNullValue());
- assertThat("expected " + string, string, equalTo(next.utf8ToString()));
- assertThat(next, Matchers.notNullValue());
-
- assertThat("expected ttf of " + string, -1, equalTo((int) iterator.totalTermFreq()));
-
- PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL);
- assertThat(docsAndPositions.nextDoc(), equalTo(0));
- assertThat(freq[j], equalTo(docsAndPositions.freq()));
- assertThat(iterator.docFreq(), equalTo(-1));
- int[] termPos = pos[j];
- int[] termStartOffset = startOffset[j];
- int[] termEndOffset = endOffset[j];
- assertThat(termPos.length, equalTo(freq[j]));
- assertThat(termStartOffset.length, equalTo(freq[j]));
- assertThat(termEndOffset.length, equalTo(freq[j]));
- for (int k = 0; k < freq[j]; k++) {
- int nextPosition = docsAndPositions.nextPosition();
- assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
- assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
- assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
- assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
- }
- }
- assertThat(iterator.next(), Matchers.nullValue());
-
- XContentBuilder xBuilder = XContentFactory.jsonBuilder();
- response.toXContent(xBuilder, null);
- String utf8 = xBuilder.bytes().utf8ToString().replaceFirst("\"took\":\\d+,", "");;
- String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
- + i
- + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"field_statistics\":{\"sum_doc_freq\":120,\"doc_count\":15,\"sum_ttf\":135},\"terms\":{\"brown\":{\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
- assertThat(utf8, equalTo(expectedString));
-
- }
-
- private void checkAllInfo(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset, int i)
- throws IOException {
- TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
- .setPositions(true).setFieldStatistics(true).setTermStatistics(true).setSelectedFields();
- assertThat(resp.request().fieldStatistics(), equalTo(true));
- TermVectorsResponse response = resp.execute().actionGet();
- assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
- Fields fields = response.getFields();
- assertThat(fields.size(), equalTo(1));
- Terms terms = fields.terms("field");
- assertThat(terms.size(), equalTo(8L));
- assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
- assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
- assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
- TermsEnum iterator = terms.iterator();
- for (int j = 0; j < values.length; j++) {
- String string = values[j];
- BytesRef next = iterator.next();
- assertThat(next, Matchers.notNullValue());
- assertThat("expected " + string, string, equalTo(next.utf8ToString()));
- assertThat(next, Matchers.notNullValue());
- if (string.equals("the")) {
- assertThat("expected ttf of " + string, numDocs * 2, equalTo((int) iterator.totalTermFreq()));
- } else {
- assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq()));
- }
-
- PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL);
- assertThat(docsAndPositions.nextDoc(), equalTo(0));
- assertThat(freq[j], equalTo(docsAndPositions.freq()));
- assertThat(iterator.docFreq(), equalTo(numDocs));
- int[] termPos = pos[j];
- int[] termStartOffset = startOffset[j];
- int[] termEndOffset = endOffset[j];
- assertThat(termPos.length, equalTo(freq[j]));
- assertThat(termStartOffset.length, equalTo(freq[j]));
- assertThat(termEndOffset.length, equalTo(freq[j]));
- for (int k = 0; k < freq[j]; k++) {
- int nextPosition = docsAndPositions.nextPosition();
- assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
- assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
- assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
- assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
- }
- }
- assertThat(iterator.next(), Matchers.nullValue());
-
- XContentBuilder xBuilder = XContentFactory.jsonBuilder();
- response.toXContent(xBuilder, ToXContent.EMPTY_PARAMS);
- String utf8 = xBuilder.bytes().utf8ToString().replaceFirst("\"took\":\\d+,", "");;
- String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
- + i
- + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"field_statistics\":{\"sum_doc_freq\":120,\"doc_count\":15,\"sum_ttf\":135},\"terms\":{\"brown\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"doc_freq\":15,\"ttf\":30,\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
- assertThat(utf8, equalTo(expectedString));
- }
-
-}
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java
index ba2f5de24b..cc7a73278e 100644
--- a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java
@@ -193,7 +193,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
.setSettings(Settings.builder()
.put(indexSettings())
.put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
- .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ .putArray("index.analysis.analyzer.tv_test.filter", "lowercase")));
for (int i = 0; i < 10; i++) {
client().prepareIndex("test", "type1", Integer.toString(i))
.setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
@@ -216,10 +216,9 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
public void testRandomSingleTermVectors() throws IOException {
FieldType ft = new FieldType();
- int config = randomInt(6);
+ int config = randomInt(4);
boolean storePositions = false;
boolean storeOffsets = false;
- boolean storePayloads = false;
boolean storeTermVectors = false;
switch (config) {
case 0: {
@@ -246,23 +245,11 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
storeOffsets = true;
break;
}
- case 5: {
- storeTermVectors = true;
- storePositions = true;
- storePayloads = true;
- break;
- }
- case 6: {
- storeTermVectors = true;
- storePositions = true;
- storeOffsets = true;
- storePayloads = true;
- break;
- }
+ default:
+ throw new IllegalArgumentException("Unsupported option: " + config);
}
ft.setStoreTermVectors(storeTermVectors);
ft.setStoreTermVectorOffsets(storeOffsets);
- ft.setStoreTermVectorPayloads(storePayloads);
ft.setStoreTermVectorPositions(storePositions);
String optionString = FieldMapper.termVectorOptionsToString(ft);
@@ -278,7 +265,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertAcked(prepareCreate("test").addMapping("type1", mapping)
.setSettings(Settings.builder()
.put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
- .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ .putArray("index.analysis.analyzer.tv_test.filter", "lowercase")));
for (int i = 0; i < 10; i++) {
client().prepareIndex("test", "type1", Integer.toString(i))
.setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
@@ -293,13 +280,12 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
int[][] startOffset = {{10}, {40}, {16}, {20}, {35}, {26}, {4}, {0, 31}};
int[][] endOffset = {{15}, {43}, {19}, {25}, {39}, {30}, {9}, {3, 34}};
- boolean isPayloadRequested = randomBoolean();
boolean isOffsetRequested = randomBoolean();
boolean isPositionsRequested = randomBoolean();
- String infoString = createInfoString(isPositionsRequested, isOffsetRequested, isPayloadRequested, optionString);
+ String infoString = createInfoString(isPositionsRequested, isOffsetRequested, optionString);
for (int i = 0; i < 10; i++) {
TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(i))
- .setPayloads(isPayloadRequested).setOffsets(isOffsetRequested).setPositions(isPositionsRequested).setSelectedFields();
+ .setOffsets(isOffsetRequested).setPositions(isPositionsRequested).setSelectedFields();
TermVectorsResponse response = resp.execute().actionGet();
assertThat(infoString + "doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
Fields fields = response.getFields();
@@ -340,13 +326,8 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
} else {
assertThat(infoString + "positions for term: ", nextPosition, equalTo(-1));
}
- // only return something useful if requested and stored
- if (isPayloadRequested && storePayloads) {
- assertThat(infoString + "payloads for term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef(
- "word")));
- } else {
- assertThat(infoString + "payloads for term: " + string, docsAndPositions.getPayload(), equalTo(null));
- }
+ // payloads are never made by the mapping in this test
+ assertNull(infoString + "payloads for term: " + string, docsAndPositions.getPayload());
// only return something useful if requested and stored
if (isOffsetRequested && storeOffsets) {
@@ -365,11 +346,9 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
}
}
- private String createInfoString(boolean isPositionsRequested, boolean isOffsetRequested, boolean isPayloadRequested,
- String optionString) {
+ private String createInfoString(boolean isPositionsRequested, boolean isOffsetRequested, String optionString) {
String ret = "Store config: " + optionString + "\n" + "Requested: pos-"
- + (isPositionsRequested ? "yes" : "no") + ", offsets-" + (isOffsetRequested ? "yes" : "no") + ", payload- "
- + (isPayloadRequested ? "yes" : "no") + "\n";
+ + (isPositionsRequested ? "yes" : "no") + ", offsets-" + (isOffsetRequested ? "yes" : "no") + "\n";
return ret;
}
@@ -585,7 +564,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
.setSettings(Settings.builder()
.put(indexSettings())
.put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
- .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ .putArray("index.analysis.analyzer.tv_test.filter", "lowercase")));
ensureGreen();
@@ -645,9 +624,8 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
- if (withPayloads) {
- assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
- }
+ // We never configure an analyzer with payloads for this test so this is never returned
+ assertNull("term: " + string, docsAndPositions.getPayload());
}
}
assertThat(iterator.next(), nullValue());
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/core/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java
index e034cff3f1..2018218cc5 100644
--- a/core/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java
@@ -269,7 +269,7 @@ public class TermVectorsUnitTests extends ESTestCase {
public void testStreamRequestWithXContentBwc() throws IOException {
final byte[] data = Base64.getDecoder().decode("AAABBWluZGV4BHR5cGUCaWQBAnt9AAABDnNvbWVQcmVmZXJlbmNlFgAAAAEA//////////0AAAA=");
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
+ Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0);
try (StreamInput in = StreamInput.wrap(data)) {
in.setVersion(version);
TermVectorsRequest request = new TermVectorsRequest();
diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java
index f9f4a136e1..b6b6b3024b 100644
--- a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java
@@ -59,6 +59,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
@@ -327,7 +328,8 @@ public class ClusterRerouteIT extends ESIntegTestCase {
int toggle = nodesIds.indexOf(node.getName());
// Rerouting shards is not blocked
- for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA,
+ SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("test-blocks", blockSetting);
assertAcked(client().admin().cluster().prepareReroute()
diff --git a/core/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/core/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java
index a7fe1b918c..a84d160cf0 100644
--- a/core/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java
@@ -49,7 +49,7 @@ public class ClusterBlockTests extends ESTestCase {
}
ClusterBlock clusterBlock = new ClusterBlock(randomInt(), "cluster block #" + randomInt(), randomBoolean(),
- randomBoolean(), randomFrom(RestStatus.values()), levels);
+ randomBoolean(), false, randomFrom(RestStatus.values()), levels);
BytesStreamOutput out = new BytesStreamOutput();
out.setVersion(version);
@@ -75,7 +75,7 @@ public class ClusterBlockTests extends ESTestCase {
levels.add(randomFrom(ClusterBlockLevel.values()));
}
ClusterBlock clusterBlock = new ClusterBlock(randomInt(), "cluster block #" + randomInt(), randomBoolean(),
- randomBoolean(), randomFrom(RestStatus.values()), levels);
+ randomBoolean(), false, randomFrom(RestStatus.values()), levels);
assertThat(clusterBlock.toString(), not(endsWith(",")));
}
@@ -86,7 +86,7 @@ public class ClusterBlockTests extends ESTestCase {
levels.add(randomFrom(ClusterBlockLevel.values()));
}
ClusterBlock globalBlock = new ClusterBlock(randomInt(), "cluster block #" + randomInt(), randomBoolean(),
- randomBoolean(), randomFrom(RestStatus.values()), levels);
+ randomBoolean(), false, randomFrom(RestStatus.values()), levels);
ClusterBlocks clusterBlocks = new ClusterBlocks(Collections.singleton(globalBlock), ImmutableOpenMap.of());
ClusterBlockException exception = clusterBlocks.indicesBlockedException(randomFrom(globalBlock.levels()), new String[0]);
assertNotNull(exception);
diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java
index 548f9d407c..91b7a18236 100644
--- a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java
@@ -76,8 +76,10 @@ public class DiscoveryNodeTests extends ESTestCase {
assertEquals(transportAddress.getAddress(), serialized.getHostAddress());
assertEquals(transportAddress.getAddress(), serialized.getAddress().getAddress());
assertEquals(transportAddress.getPort(), serialized.getAddress().getPort());
- assertFalse("if the minimum compatibility version moves past 5.0.3, remove the special casing in DiscoverNode(StreamInput) and " +
- "the TransportAddress(StreamInput, String) constructor",
- Version.CURRENT.minimumCompatibilityVersion().onOrAfter(Version.V_5_0_3_UNRELEASED));
+ assertFalse("if the minimum index compatibility version moves past 5.0.3, remove the special casing in DiscoverNode(StreamInput)" +
+ " and the TransportAddress(StreamInput, String) constructor",
+ Version.CURRENT.minimumIndexCompatibilityVersion().after(Version.V_5_0_2));
+ // serialization can happen from an old cluster-state in a full cluster restart
+ // hence we need to maintain this until we drop index bwc
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
index 4e77741694..3b2fb365ca 100644
--- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
@@ -139,7 +139,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase {
// serialize with old version
outStream = new BytesStreamOutput();
- outStream.setVersion(Version.CURRENT.minimumCompatibilityVersion());
+ outStream.setVersion(Version.CURRENT.minimumIndexCompatibilityVersion());
diffs.writeTo(outStream);
inStream = outStream.bytes().streamInput();
inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java
index bab53b8f35..61e31666f3 100644
--- a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java
@@ -25,6 +25,7 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResp
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.discovery.Discovery;
@@ -33,6 +34,7 @@ import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
+import org.junit.After;
import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
@@ -42,8 +44,15 @@ import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
-@ClusterScope(scope = TEST)
public class ClusterSettingsIT extends ESIntegTestCase {
+
+ @After
+ public void cleanup() throws Exception {
+ assertAcked(client().admin().cluster().prepareUpdateSettings()
+ .setPersistentSettings(Settings.builder().putNull("*"))
+ .setTransientSettings(Settings.builder().putNull("*")));
+ }
+
public void testClusterNonExistingSettingsUpdate() {
String key1 = "no_idea_what_you_are_talking_about";
int value1 = 10;
@@ -302,13 +311,25 @@ public class ClusterSettingsIT extends ESIntegTestCase {
assertBlocked(request, MetaData.CLUSTER_READ_ONLY_BLOCK);
// But it's possible to update the settings to update the "cluster.blocks.read_only" setting
- Settings settings = Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), false).build();
+ Settings settings = Settings.builder().putNull(MetaData.SETTING_READ_ONLY_SETTING.getKey()).build();
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
} finally {
setClusterReadOnly(false);
}
+ // Cluster settings updates are blocked when the cluster is read only
+ try {
+ // But it's possible to update the settings to update the "cluster.blocks.read_only" setting
+ Settings settings = Settings.builder().put(MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true).build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
+ assertBlocked(request, MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
+ } finally {
+ // But it's possible to update the settings to update the "cluster.blocks.read_only" setting
+ Settings s = Settings.builder().putNull(MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey()).build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(s).get());
+ }
+
// It should work now
ClusterUpdateSettingsResponse response = request.execute().actionGet();
diff --git a/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java b/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java
index e648dce6a6..6e1475d0ae 100644
--- a/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsIT.java
@@ -33,6 +33,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
import static org.hamcrest.Matchers.equalTo;
@@ -136,7 +137,8 @@ public class ClusterSearchShardsIT extends ESIntegTestCase {
ensureGreen("test-blocks");
// Request is not blocked
- for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY,
+ SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("test-blocks", blockSetting);
ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test-blocks").execute().actionGet();
diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java
index b67000e2b2..34a48862e1 100644
--- a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java
+++ b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java
@@ -805,7 +805,7 @@ public class BytesStreamsTests extends ESTestCase {
}
assertTrue("If we're not compatible with 5.1.1 we can drop the assertion below",
- Version.CURRENT.minimumCompatibilityVersion().onOrBefore(Version.V_5_1_1_UNRELEASED));
+ Version.CURRENT.minimumIndexCompatibilityVersion().onOrBefore(Version.V_5_1_1));
/* Read -1 as serialized by a version of Elasticsearch that supported writing negative numbers with writeVLong. Note that this
* should be the same test as the first case (when value is negative) but we've kept some bytes so no matter what we do to
* writeVLong in the future we can be sure we can read bytes as written by Elasticsearch before 5.1.2 */
diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java
index 01ace21ad1..dd96acdd6c 100644
--- a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java
+++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java
@@ -103,6 +103,30 @@ public class ScopedSettingsTests extends ESTestCase {
assertNull(target.build().getAsInt("archived.foo.bar", null));
}
+ public void testResetSettingWithIPValidator() {
+ Settings currentSettings = Settings.builder().put("index.routing.allocation.require._ip", "192.168.0.1,127.0.0.1")
+ .put("index.some.dyn.setting", 1)
+ .build();
+ Setting<Integer> dynamicSetting = Setting.intSetting("index.some.dyn.setting", 1, Property.Dynamic, Property.IndexScope);
+
+ IndexScopedSettings settings = new IndexScopedSettings(currentSettings,
+ new HashSet<>(Arrays.asList(dynamicSetting, IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING)));
+ Settings s = IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(currentSettings);
+ assertEquals(1, s.size());
+ assertEquals("192.168.0.1,127.0.0.1", s.get("_ip"));
+ Settings.Builder builder = Settings.builder();
+ Settings updates = Settings.builder().putNull("index.routing.allocation.require._ip")
+ .put("index.some.dyn.setting", 1).build();
+ settings.validate(updates);
+ settings.updateDynamicSettings(updates,
+ Settings.builder().put(currentSettings), builder, "node");
+ currentSettings = builder.build();
+ s = IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(currentSettings);
+ assertEquals(0, s.size());
+ assertEquals(1, dynamicSetting.get(currentSettings).intValue());
+ assertEquals(1, currentSettings.size());
+ }
+
public void testAddConsumer() {
Setting<Integer> testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope);
Setting<Integer> testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope);
diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java
new file mode 100644
index 0000000000..732ec94ae1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ * Tests for the automatic queue resizing of the {@code QueueResizingEsThreadPoolExecutorTests}
+ * based on the time taken for each event.
+ */
+public class QueueResizingEsThreadPoolExecutorTests extends ESTestCase {
+
+ public void testExactWindowSizeAdjustment() throws Exception {
+ ThreadContext context = new ThreadContext(Settings.EMPTY);
+ ResizableBlockingQueue<Runnable> queue =
+ new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), 100);
+
+ int threads = randomIntBetween(1, 3);
+ int measureWindow = 3;
+ logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
+ QueueResizingEsThreadPoolExecutor executor =
+ new QueueResizingEsThreadPoolExecutor(
+ "test-threadpool", threads, threads, 1000,
+ TimeUnit.MILLISECONDS, queue, 10, 1000, fastWrapper(),
+ measureWindow, TimeValue.timeValueMillis(1), EsExecutors.daemonThreadFactory("queuetest"),
+ new EsAbortPolicy(), context);
+ executor.prestartAllCoreThreads();
+ logger.info("--> executor: {}", executor);
+
+ // Execute exactly 3 (measureWindow) times
+ executor.execute(() -> {});
+ executor.execute(() -> {});
+ executor.execute(() -> {});
+
+ // The queue capacity should have increased by 50 since they were very fast tasks
+ assertBusy(() -> {
+ assertThat(queue.capacity(), equalTo(150));
+ });
+ executor.shutdown();
+ executor.awaitTermination(10, TimeUnit.SECONDS);
+ context.close();
+ }
+
+ public void testAutoQueueSizingUp() throws Exception {
+ ThreadContext context = new ThreadContext(Settings.EMPTY);
+ ResizableBlockingQueue<Runnable> queue =
+ new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(),
+ 2000);
+
+ int threads = randomIntBetween(1, 10);
+ int measureWindow = randomIntBetween(100, 200);
+ logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
+ QueueResizingEsThreadPoolExecutor executor =
+ new QueueResizingEsThreadPoolExecutor(
+ "test-threadpool", threads, threads, 1000,
+ TimeUnit.MILLISECONDS, queue, 10, 3000, fastWrapper(),
+ measureWindow, TimeValue.timeValueMillis(1), EsExecutors.daemonThreadFactory("queuetest"),
+ new EsAbortPolicy(), context);
+ executor.prestartAllCoreThreads();
+ logger.info("--> executor: {}", executor);
+
+ // Execute a task multiple times that takes 1ms
+ executeTask(executor, (measureWindow * 5) + 2);
+
+ assertBusy(() -> {
+ assertThat(queue.capacity(), greaterThan(2000));
+ });
+ executor.shutdown();
+ executor.awaitTermination(10, TimeUnit.SECONDS);
+ context.close();
+ }
+
+ public void testAutoQueueSizingDown() throws Exception {
+ ThreadContext context = new ThreadContext(Settings.EMPTY);
+ ResizableBlockingQueue<Runnable> queue =
+ new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(),
+ 2000);
+
+ int threads = randomIntBetween(1, 10);
+ int measureWindow = randomIntBetween(100, 200);
+ logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
+ QueueResizingEsThreadPoolExecutor executor =
+ new QueueResizingEsThreadPoolExecutor(
+ "test-threadpool", threads, threads, 1000,
+ TimeUnit.MILLISECONDS, queue, 10, 3000, slowWrapper(), measureWindow, TimeValue.timeValueMillis(1),
+ EsExecutors.daemonThreadFactory("queuetest"), new EsAbortPolicy(), context);
+ executor.prestartAllCoreThreads();
+ logger.info("--> executor: {}", executor);
+
+ // Execute a task multiple times that takes 1m
+ executeTask(executor, (measureWindow * 5) + 2);
+
+ assertBusy(() -> {
+ assertThat(queue.capacity(), lessThan(2000));
+ });
+ executor.shutdown();
+ executor.awaitTermination(10, TimeUnit.SECONDS);
+ context.close();
+ }
+
+ public void testAutoQueueSizingWithMin() throws Exception {
+ ThreadContext context = new ThreadContext(Settings.EMPTY);
+ ResizableBlockingQueue<Runnable> queue =
+ new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(),
+ 5000);
+
+ int threads = randomIntBetween(1, 5);
+ int measureWindow = randomIntBetween(10, 100);;
+ int min = randomIntBetween(4981, 4999);
+ logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
+ QueueResizingEsThreadPoolExecutor executor =
+ new QueueResizingEsThreadPoolExecutor(
+ "test-threadpool", threads, threads, 1000,
+ TimeUnit.MILLISECONDS, queue, min, 100000, slowWrapper(), measureWindow, TimeValue.timeValueMillis(1),
+ EsExecutors.daemonThreadFactory("queuetest"), new EsAbortPolicy(), context);
+ executor.prestartAllCoreThreads();
+ logger.info("--> executor: {}", executor);
+
+ // Execute a task multiple times that takes 1m
+ executeTask(executor, (measureWindow * 5));
+
+ // The queue capacity should decrease, but no lower than the minimum
+ assertBusy(() -> {
+ assertThat(queue.capacity(), equalTo(min));
+ });
+ executor.shutdown();
+ executor.awaitTermination(10, TimeUnit.SECONDS);
+ context.close();
+ }
+
+ public void testAutoQueueSizingWithMax() throws Exception {
+ ThreadContext context = new ThreadContext(Settings.EMPTY);
+ ResizableBlockingQueue<Runnable> queue =
+ new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(),
+ 5000);
+
+ int threads = randomIntBetween(1, 5);
+ int measureWindow = randomIntBetween(10, 100);
+ int max = randomIntBetween(5010, 5024);
+ logger.info("--> auto-queue with a measurement window of {} tasks", measureWindow);
+ QueueResizingEsThreadPoolExecutor executor =
+ new QueueResizingEsThreadPoolExecutor(
+ "test-threadpool", threads, threads, 1000,
+ TimeUnit.MILLISECONDS, queue, 10, max, fastWrapper(), measureWindow, TimeValue.timeValueMillis(1),
+ EsExecutors.daemonThreadFactory("queuetest"), new EsAbortPolicy(), context);
+ executor.prestartAllCoreThreads();
+ logger.info("--> executor: {}", executor);
+
+ // Execute a task multiple times that takes 1ms
+ executeTask(executor, measureWindow * 3);
+
+ // The queue capacity should increase, but no higher than the maximum
+ assertBusy(() -> {
+ assertThat(queue.capacity(), equalTo(max));
+ });
+ executor.shutdown();
+ executor.awaitTermination(10, TimeUnit.SECONDS);
+ context.close();
+ }
+
+ private Function<Runnable, Runnable> randomBetweenLimitsWrapper(final int minNs, final int maxNs) {
+ return (runnable) -> {
+ return new SettableTimedRunnable(randomIntBetween(minNs, maxNs));
+ };
+ }
+
+ private Function<Runnable, Runnable> fastWrapper() {
+ return (runnable) -> {
+ return new SettableTimedRunnable(TimeUnit.NANOSECONDS.toNanos(50));
+ };
+ }
+
+ private Function<Runnable, Runnable> slowWrapper() {
+ return (runnable) -> {
+ return new SettableTimedRunnable(TimeUnit.MINUTES.toNanos(2));
+ };
+ }
+
+ /** Execute a blank task {@code times} times for the executor */
+ private void executeTask(QueueResizingEsThreadPoolExecutor executor, int times) {
+ logger.info("--> executing a task [{}] times", times);
+ for (int i = 0; i < times; i++) {
+ executor.execute(() -> {});
+ }
+ }
+
+ public class SettableTimedRunnable extends TimedRunnable {
+ private final long timeTaken;
+
+ public SettableTimedRunnable(long timeTaken) {
+ super(() -> {});
+ this.timeTaken = timeTaken;
+ }
+
+ @Override
+ public long getTotalNanos() {
+ return timeTaken;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/ResizableBlockingQueueTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/ResizableBlockingQueueTests.java
new file mode 100644
index 0000000000..b1d5b9bc1b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/ResizableBlockingQueueTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.test.ESTestCase;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ResizableBlockingQueueTests extends ESTestCase {
+
+ public void testAdjustCapacity() throws Exception {
+ ResizableBlockingQueue<Runnable> queue =
+ new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(),
+ 100);
+
+ assertThat(queue.capacity(), equalTo(100));
+ // Queue size already equal to desired capacity
+ queue.adjustCapacity(100, 25, 1, 1000);
+ assertThat(queue.capacity(), equalTo(100));
+ // Not worth adjusting
+ queue.adjustCapacity(99, 25, 1, 1000);
+ assertThat(queue.capacity(), equalTo(100));
+ // Not worth adjusting
+ queue.adjustCapacity(75, 25, 1, 1000);
+ assertThat(queue.capacity(), equalTo(100));
+ queue.adjustCapacity(74, 25, 1, 1000);
+ assertThat(queue.capacity(), equalTo(75));
+ queue.adjustCapacity(1000000, 25, 1, 1000);
+ assertThat(queue.capacity(), equalTo(100));
+ queue.adjustCapacity(1, 25, 80, 1000);
+ assertThat(queue.capacity(), equalTo(80));
+ queue.adjustCapacity(1000000, 25, 80, 100);
+ assertThat(queue.capacity(), equalTo(100));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java
index b961b6d6fb..65856add56 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java
@@ -361,8 +361,8 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
IllegalStateException ex = expectThrows(IllegalStateException.class, () ->
request.messageReceived(new MembershipAction.ValidateJoinRequest(stateBuilder.build()), null));
assertEquals("index [test] version not supported: "
- + VersionUtils.getPreviousVersion(Version.CURRENT.minimumCompatibilityVersion())
- + " minimum compatible index version is: " + Version.CURRENT.minimumCompatibilityVersion(), ex.getMessage());
+ + VersionUtils.getPreviousVersion(Version.CURRENT.minimumIndexCompatibilityVersion())
+ + " minimum compatible index version is: " + Version.CURRENT.minimumIndexCompatibilityVersion(), ex.getMessage());
} else {
AtomicBoolean sendResponse = new AtomicBoolean(false);
request.messageReceived(new MembershipAction.ValidateJoinRequest(stateBuilder.build()), new TransportChannel() {
diff --git a/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java
index ee1f654642..2fa56fa34a 100644
--- a/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java
+++ b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java
@@ -606,7 +606,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
public void testSerialization() throws IOException {
for (Version version : new Version[] {Version.CURRENT, Version.V_5_0_1}){
for (int i = 0; i < 20; i++) {
- assertSerialization(randomFieldStats(version.onOrAfter(Version.V_5_2_0_UNRELEASED)), version);
+ assertSerialization(randomFieldStats(version.onOrAfter(Version.V_5_2_0)), version);
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java
index af3fdf9adb..74ec1cc02d 100644
--- a/core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java
+++ b/core/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java
@@ -152,7 +152,7 @@ public class IndexSortSettingsTests extends ESTestCase {
.put("index.sort.field", "field1")
.build();
IllegalArgumentException exc =
- expectThrows(IllegalArgumentException.class, () -> indexSettings(settings, Version.V_5_4_0_UNRELEASED));
+ expectThrows(IllegalArgumentException.class, () -> indexSettings(settings, Version.V_5_4_0));
assertThat(exc.getMessage(),
containsString("unsupported index.version.created:5.4.0, " +
"can't set index.sort on versions prior to 6.0.0-alpha1"));
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
index 214515d170..b20972aded 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
@@ -63,8 +63,8 @@ public class PreBuiltAnalyzerTests extends ESSingleNodeTestCase {
assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT),
PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT));
// same lucene version should be cached
- assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_2_2_UNRELEASED),
- PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_2_3_UNRELEASED));
+ assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_2_1),
+ PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_2_2));
assertNotSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_0_0),
PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_0_1));
diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
index 7dc94d972c..e72f68e1d2 100644
--- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
+++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -1486,7 +1486,7 @@ public class InternalEngineTests extends ESTestCase {
IndexSettings oldSettings = IndexSettingsModule.newIndexSettings("testOld", Settings.builder()
.put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us
.put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName)
- .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_4_0_UNRELEASED)
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_4_0)
.put(MapperService.INDEX_MAPPING_SINGLE_TYPE_SETTING.getKey(), true)
.put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(),
between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY)))
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java
index b42bda0a5a..72b1c95d8b 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperTests.java
@@ -56,7 +56,7 @@ public class MapperTests extends ESTestCase {
"As a replacement, you can use an [copy_to] on mapping fields to create your own catch all field.",
e.getMessage());
- settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_3_0_UNRELEASED).build();
+ settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_3_0).build();
// Create the mapping service with an older index creation version
final MapperService oldMapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), settings, "test");
diff --git a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java
index d19e8e32ff..ec34f6d87e 100644
--- a/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java
@@ -336,7 +336,7 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase<MoreLik
public void testItemSerializationBwc() throws IOException {
final byte[] data = Base64.getDecoder().decode("AQVpbmRleAEEdHlwZQEODXsiZm9vIjoiYmFyIn0A/wD//////////QAAAAAAAAAA");
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
+ Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0);
try (StreamInput in = StreamInput.wrap(data)) {
in.setVersion(version);
Item item = new Item(in);
diff --git a/core/src/test/java/org/elasticsearch/index/refresh/RefreshStatsTests.java b/core/src/test/java/org/elasticsearch/index/refresh/RefreshStatsTests.java
index 91ac42628e..27221b0af9 100644
--- a/core/src/test/java/org/elasticsearch/index/refresh/RefreshStatsTests.java
+++ b/core/src/test/java/org/elasticsearch/index/refresh/RefreshStatsTests.java
@@ -34,15 +34,4 @@ public class RefreshStatsTests extends AbstractStreamableTestCase<RefreshStats>
protected RefreshStats createBlankInstance() {
return new RefreshStats();
}
-
- public void testPre5Dot2() throws IOException {
- // We can drop the compatibility once the assertion just below this list fails
- assertTrue(Version.CURRENT.minimumCompatibilityVersion().before(Version.V_5_2_0_UNRELEASED));
-
- RefreshStats instance = createTestInstance();
- RefreshStats copied = copyInstance(instance, Version.V_5_1_1_UNRELEASED);
- assertEquals(instance.getTotal(), copied.getTotal());
- assertEquals(instance.getTotalTimeInMillis(), copied.getTotalTimeInMillis());
- assertEquals(0, copied.getListeners());
- }
}
diff --git a/core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java b/core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java
index 982198c8fe..e2bf25ce1b 100644
--- a/core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java
+++ b/core/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java
@@ -75,7 +75,7 @@ public class BulkByScrollTaskStatusTests extends ESTestCase {
assertEquals(expected.getRequestsPerSecond(), actual.getRequestsPerSecond(), 0f);
assertEquals(expected.getReasonCancelled(), actual.getReasonCancelled());
assertEquals(expected.getThrottledUntil(), actual.getThrottledUntil());
- if (version.onOrAfter(Version.V_5_1_1_UNRELEASED)) {
+ if (version.onOrAfter(Version.V_5_1_1)) {
assertThat(actual.getSliceStatuses(), Matchers.hasSize(expected.getSliceStatuses().size()));
for (int i = 0; i < expected.getSliceStatuses().size(); i++) {
BulkByScrollTask.StatusOrException sliceStatus = expected.getSliceStatuses().get(i);
diff --git a/core/src/test/java/org/elasticsearch/index/termvectors/TermVectorsServiceTests.java b/core/src/test/java/org/elasticsearch/index/termvectors/TermVectorsServiceTests.java
index c79a61a22b..c047235ada 100644
--- a/core/src/test/java/org/elasticsearch/index/termvectors/TermVectorsServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/index/termvectors/TermVectorsServiceTests.java
@@ -19,6 +19,9 @@
package org.elasticsearch.index.termvectors;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.termvectors.TermVectorsRequest;
import org.elasticsearch.action.termvectors.TermVectorsResponse;
import org.elasticsearch.common.settings.Settings;
@@ -28,6 +31,7 @@ import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.test.ESSingleNodeTestCase;
+import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
@@ -71,4 +75,45 @@ public class TermVectorsServiceTests extends ESSingleNodeTestCase {
assertThat(response, notNullValue());
assertThat(response.getTookInMillis(), equalTo(TimeUnit.NANOSECONDS.toMillis(longs.get(1) - longs.get(0))));
}
+
+ public void testDocFreqs() throws IOException {
+ XContentBuilder mapping = jsonBuilder()
+ .startObject()
+ .startObject("doc")
+ .startObject("properties")
+ .startObject("text")
+ .field("type", "text")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ Settings settings = Settings.builder()
+ .put("number_of_shards", 1)
+ .build();
+ createIndex("test", settings, "doc", mapping);
+ ensureGreen();
+
+ int max = between(3, 10);
+ BulkRequestBuilder bulk = client().prepareBulk();
+ for (int i = 0; i < max; i++) {
+ bulk.add(client().prepareIndex("test", "doc", Integer.toString(i))
+ .setSource("text", "the quick brown fox jumped over the lazy dog"));
+ }
+ bulk.get();
+
+ TermVectorsRequest request = new TermVectorsRequest("test", "doc", "0").termStatistics(true);
+
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService test = indicesService.indexService(resolveIndex("test"));
+ IndexShard shard = test.getShardOrNull(0);
+ assertThat(shard, notNullValue());
+ TermVectorsResponse response = TermVectorsService.getTermVectors(shard, request);
+
+ Terms terms = response.getFields().terms("text");
+ TermsEnum iterator = terms.iterator();
+ while (iterator.next() != null) {
+ assertEquals(max, iterator.docFreq());
+ }
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java
index 2572b7aeb0..298c8938dd 100644
--- a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java
@@ -23,11 +23,8 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
-import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
import org.apache.lucene.analysis.hunspell.Dictionary;
-import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.store.Directory;
@@ -127,12 +124,6 @@ public class AnalysisModuleTests extends ESTestCase {
testSimpleConfiguration(settings);
}
- public void testDefaultFactoryTokenFilters() throws IOException {
- assertTokenFilter("keyword_repeat", KeywordRepeatFilter.class);
- assertTokenFilter("persian_normalization", PersianNormalizationFilter.class);
- assertTokenFilter("arabic_normalization", ArabicNormalizationFilter.class);
- }
-
public void testAnalyzerAliasNotAllowedPost5x() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.analyzer.foobar.type", "standard")
diff --git a/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsIT.java b/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsIT.java
index 1f56e4cfc5..dbfa5fb2d0 100644
--- a/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsIT.java
@@ -30,6 +30,7 @@ import java.util.Arrays;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
@@ -66,7 +67,8 @@ public class IndicesExistsIT extends ESIntegTestCase {
createIndex("ro");
// Request is not blocked
- for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY,
+ SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("ro", blockSetting);
assertThat(client().admin().indices().prepareExists("ro").execute().actionGet().isExists(), equalTo(true));
diff --git a/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java b/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java
index d53230d3a9..cb45a639c0 100644
--- a/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksIT.java
@@ -30,6 +30,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
import static org.hamcrest.Matchers.equalTo;
@@ -43,7 +44,7 @@ public class GetSettingsBlocksIT extends ESIntegTestCase {
.put("index.merge.policy.expunge_deletes_allowed", "30")
.put(MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey(), false)));
- for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_READ_ONLY_ALLOW_DELETE)) {
try {
enableIndexBlock("test", block);
GetSettingsResponse response = client().admin().indices().prepareGetSettings("test").get();
diff --git a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java
index 8dbaaf3e94..a867425f39 100644
--- a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java
@@ -44,6 +44,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
@@ -190,7 +191,7 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
assertIndexIsOpened("test1", "test2", "test3");
}
-
+
public void testCloseNoIndex() {
Client client = client();
Exception e = expectThrows(ActionRequestValidationException.class, () ->
@@ -380,7 +381,7 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
assertIndexIsClosed("test");
// Opening an index is blocked
- for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_READ_ONLY_ALLOW_DELETE, SETTING_BLOCKS_METADATA)) {
try {
enableIndexBlock("test", blockSetting);
assertBlocked(client().admin().indices().prepareOpen("test"));
diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java
index f41d01b32c..6ca6b0ea8c 100644
--- a/core/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java
+++ b/core/src/test/java/org/elasticsearch/ingest/PipelineConfigurationTests.java
@@ -54,9 +54,9 @@ public class PipelineConfigurationTests extends ESTestCase {
public void testSerializationBwc() throws IOException {
final byte[] data = Base64.getDecoder().decode("ATECe30AAAA=");
- final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
- Version.V_5_0_3_UNRELEASED, Version.V_5_1_1_UNRELEASED, Version.V_5_1_2_UNRELEASED, Version.V_5_2_0_UNRELEASED);
try (StreamInput in = StreamInput.wrap(data)) {
+ final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_1, Version.V_5_0_2,
+ Version.V_5_1_1, Version.V_5_1_2, Version.V_5_2_0);
in.setVersion(version);
PipelineConfiguration configuration = PipelineConfiguration.readFrom(in);
assertEquals(XContentType.JSON, configuration.getXContentType());
diff --git a/core/src/test/java/org/elasticsearch/script/IndexLookupIT.java b/core/src/test/java/org/elasticsearch/script/IndexLookupIT.java
deleted file mode 100644
index c455870779..0000000000
--- a/core/src/test/java/org/elasticsearch/script/IndexLookupIT.java
+++ /dev/null
@@ -1,1029 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.script;
-
-import org.elasticsearch.action.search.SearchPhaseExecutionException;
-import org.elasticsearch.action.search.SearchResponse;
-import org.elasticsearch.action.search.ShardSearchFailure;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentFactory;
-import org.elasticsearch.index.query.QueryBuilders;
-import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
-import org.elasticsearch.plugins.Plugin;
-import org.elasticsearch.search.SearchHit;
-import org.elasticsearch.search.lookup.IndexField;
-import org.elasticsearch.search.lookup.IndexFieldTerm;
-import org.elasticsearch.search.lookup.IndexLookup;
-import org.elasticsearch.search.lookup.LeafIndexLookup;
-import org.elasticsearch.search.lookup.TermPosition;
-import org.elasticsearch.test.ESIntegTestCase;
-import org.hamcrest.Matchers;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.function.Function;
-
-import static java.util.Collections.emptyList;
-
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
-import static org.hamcrest.Matchers.containsString;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.greaterThan;
-import static org.hamcrest.Matchers.greaterThanOrEqualTo;
-
-public class IndexLookupIT extends ESIntegTestCase {
-
- private static final String INCLUDE_ALL = "_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS|_CACHE";
- private static final int ALL_FLAGS = IndexLookup.FLAG_FREQUENCIES
- | IndexLookup.FLAG_OFFSETS
- | IndexLookup.FLAG_PAYLOADS
- | IndexLookup.FLAG_POSITIONS
- | IndexLookup.FLAG_CACHE;
-
- private static final String INCLUDE_ALL_BUT_CACHE = "_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS";
- private static final int ALL_FLAGS_WITHOUT_CACHE = IndexLookup.FLAG_FREQUENCIES
- | IndexLookup.FLAG_OFFSETS
- | IndexLookup.FLAG_PAYLOADS
- | IndexLookup.FLAG_POSITIONS;
-
- private HashMap<String, List<Object>> expectedEndOffsetsArray;
- private HashMap<String, List<Object>> expectedPayloadsArray;
- private HashMap<String, List<Object>> expectedPositionsArray;
- private HashMap<String, List<Object>> expectedStartOffsetsArray;
-
- @Override
- protected Collection<Class<? extends Plugin>> nodePlugins() {
- return Collections.singleton(CustomScriptPlugin.class);
- }
-
- public static class CustomScriptPlugin extends MockScriptPlugin {
-
- @Override
- @SuppressWarnings("unchecked")
- protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() {
- Map<String, Function<Map<String, Object>, Object>> scripts = new HashMap<>();
-
- scripts.put("term = _index['int_payload_field']['c']; term.tf()", vars -> tf(vars, "int_payload_field", "c"));
- scripts.put("term = _index['int_payload_field']['b']; term.tf()", vars -> tf(vars, "int_payload_field", "b"));
-
- scripts.put("Sum the payloads of [float_payload_field][b]", vars -> payloadSum(vars, "float_payload_field", "b"));
- scripts.put("Sum the payloads of [int_payload_field][b]", vars -> payloadSum(vars, "int_payload_field", "b"));
-
- scripts.put("createPositionsArrayScriptIterateTwice[b," + INCLUDE_ALL + ",position]",
- vars -> createPositionsArrayScriptIterateTwice(vars, "b", ALL_FLAGS, p -> p.position));
- scripts.put("createPositionsArrayScriptIterateTwice[b," + INCLUDE_ALL + ",startOffset]",
- vars -> createPositionsArrayScriptIterateTwice(vars, "b", ALL_FLAGS, p -> p.startOffset));
- scripts.put("createPositionsArrayScriptIterateTwice[b," + INCLUDE_ALL + ",endOffset]",
- vars -> createPositionsArrayScriptIterateTwice(vars, "b", ALL_FLAGS, p -> p.endOffset));
- scripts.put("createPositionsArrayScriptIterateTwice[b," + INCLUDE_ALL + ",payloadAsInt(-1)]",
- vars -> createPositionsArrayScriptIterateTwice(vars, "b", ALL_FLAGS, p -> p.payloadAsInt(-1)));
-
- scripts.put("createPositionsArrayScriptIterateTwice[b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,position]",
- vars -> createPositionsArrayScriptIterateTwice(vars, "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.position));
- scripts.put("createPositionsArrayScriptIterateTwice[b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,startOffset]",
- vars -> createPositionsArrayScriptIterateTwice(vars, "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.startOffset));
- scripts.put("createPositionsArrayScriptIterateTwice[b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,endOffset]",
- vars -> createPositionsArrayScriptIterateTwice(vars, "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.endOffset));
- scripts.put("createPositionsArrayScriptIterateTwice[b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,payloadAsInt(-1)]",
- vars -> createPositionsArrayScriptIterateTwice(vars, "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.payloadAsInt(-1)));
-
- scripts.put("createPositionsArrayScriptGetInfoObjectTwice[b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,position]",
- vars -> createPositionsArrayScriptGetInfoObjectTwice(vars, "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.position));
- scripts.put("createPositionsArrayScriptGetInfoObjectTwice[b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,startOffset]",
- vars -> createPositionsArrayScriptGetInfoObjectTwice(vars, "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.startOffset));
- scripts.put("createPositionsArrayScriptGetInfoObjectTwice[b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,endOffset]",
- vars -> createPositionsArrayScriptGetInfoObjectTwice(vars, "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.endOffset));
- scripts.put("createPositionsArrayScriptGetInfoObjectTwice[b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,payloadAsInt(-1)]",
- vars -> createPositionsArrayScriptGetInfoObjectTwice(vars, "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.payloadAsInt(-1)));
-
- scripts.put("createPositionsArrayScript[int_payload_field,b,_POSITIONS,position]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", IndexLookup.FLAG_POSITIONS, p -> p.position));
-
- scripts.put("createPositionsArrayScript[int_payload_field,b,_OFFSETS,position]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", IndexLookup.FLAG_OFFSETS, p -> p.position));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_OFFSETS,startOffset]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", IndexLookup.FLAG_OFFSETS, p -> p.startOffset));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_OFFSETS,endOffset]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", IndexLookup.FLAG_OFFSETS, p -> p.endOffset));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_OFFSETS,payloadAsInt(-1)]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", IndexLookup.FLAG_OFFSETS, p -> p.payloadAsInt(-1)));
-
- scripts.put("createPositionsArrayScript[int_payload_field,b,_PAYLOADS,position]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", IndexLookup.FLAG_PAYLOADS, p -> p.position));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_PAYLOADS,startOffset]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", IndexLookup.FLAG_PAYLOADS, p -> p.startOffset));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_PAYLOADS,endOffset]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", IndexLookup.FLAG_PAYLOADS, p -> p.endOffset));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_PAYLOADS,payloadAsInt(-1)]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", IndexLookup.FLAG_PAYLOADS, p -> p.payloadAsInt(-1)));
-
- int posoffpay = IndexLookup.FLAG_POSITIONS|IndexLookup.FLAG_OFFSETS|IndexLookup.FLAG_PAYLOADS;
- scripts.put("createPositionsArrayScript[int_payload_field,b,_POSITIONS|_OFFSETS|_PAYLOADS,position]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", posoffpay, p -> p.position));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_POSITIONS|_OFFSETS|_PAYLOADS,startOffset]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", posoffpay, p -> p.startOffset));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_POSITIONS|_OFFSETS|_PAYLOADS,endOffset]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", posoffpay, p -> p.endOffset));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_POSITIONS|_OFFSETS|_PAYLOADS,payloadAsInt(-1)]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", posoffpay, p -> p.payloadAsInt(-1)));
-
- scripts.put("createPositionsArrayScript[int_payload_field,b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,position]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.position));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,startOffset]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.startOffset));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,endOffset]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.endOffset));
- scripts.put("createPositionsArrayScript[int_payload_field,b,_FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS,payloadAsInt(-1)]",
- vars -> createPositionsArrayScript(vars, "int_payload_field", "b", ALL_FLAGS_WITHOUT_CACHE, p -> p.payloadAsInt(-1)));
-
- scripts.put("createPositionsArrayScript" +
- "[float_payload_field,b," + INCLUDE_ALL + ",payloadAsFloat(-1)]",
- vars -> createPositionsArrayScript(vars,"float_payload_field", "b", ALL_FLAGS, p -> p.payloadAsFloat(-1)));
- scripts.put("createPositionsArrayScript" +
- "[string_payload_field,b," + INCLUDE_ALL + ",payloadAsString()]",
- vars -> createPositionsArrayScript(vars,"string_payload_field", "b", ALL_FLAGS, TermPosition::payloadAsString));
- scripts.put("createPositionsArrayScript" +
- "[int_payload_field,c," + INCLUDE_ALL + ",payloadAsInt(-1)]",
- vars -> createPositionsArrayScript(vars,"int_payload_field", "c", ALL_FLAGS, p -> p.payloadAsInt(-1)));
-
- // Call with different flags twice, equivalent to:
- // term = _index['int_payload_field']['b']; return _index['int_payload_field'].get('b', _POSITIONS).tf();
- scripts.put("Call with different flags twice", vars -> {
- LeafIndexLookup leafIndexLookup = (LeafIndexLookup) vars.get("_index");
- IndexField indexField = leafIndexLookup.get("int_payload_field");
-
- // 1st call
- indexField.get("b");
- try {
- // 2nd call, must throws an exception
- return indexField.get("b", IndexLookup.FLAG_POSITIONS).tf();
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "Call with different flags twice", CustomScriptPlugin.NAME);
- }
- });
-
- // Call with same flags twice: equivalent to:
- // term = _index['int_payload_field'].get('b', _POSITIONS | _FREQUENCIES);return _index['int_payload_field']['b'].tf();
- scripts.put("Call with same flags twice", vars -> {
- LeafIndexLookup leafIndexLookup = (LeafIndexLookup) vars.get("_index");
- IndexField indexField = leafIndexLookup.get("int_payload_field");
-
- // 1st call
- indexField.get("b", IndexLookup.FLAG_POSITIONS | IndexLookup.FLAG_FREQUENCIES);
- try {
- // 2nd call, must throws an exception
- return indexField.get("b").tf();
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "Call with same flags twice", CustomScriptPlugin.NAME);
- }
- });
-
- // get the number of all docs
- scripts.put("_index.numDocs()",
- vars -> ((LeafIndexLookup) vars.get("_index")).numDocs());
-
- // get the number of docs with field float_payload_field
- scripts.put("_index['float_payload_field'].docCount()",
- vars -> indexFieldScript(vars, "float_payload_field", indexField -> {
- try {
- return indexField.docCount();
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "docCount()", CustomScriptPlugin.NAME);
- }
- }));
-
- // corner case: what if the field does not exist?
- scripts.put("_index['non_existent_field'].docCount()",
- vars -> indexFieldScript(vars, "non_existent_field", indexField -> {
- try {
- return indexField.docCount();
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "docCount()", CustomScriptPlugin.NAME);
- }
- }));
-
- // get the number of all tokens in all docs
- scripts.put("_index['float_payload_field'].sumttf()",
- vars -> indexFieldScript(vars, "float_payload_field", indexField -> {
- try {
- return indexField.sumttf();
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "sumttf()", CustomScriptPlugin.NAME);
- }
- }));
-
- // corner case get the number of all tokens in all docs for non existent
- // field
- scripts.put("_index['non_existent_field'].sumttf()",
- vars -> indexFieldScript(vars, "non_existent_field", indexField -> {
- try {
- return indexField.sumttf();
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "sumttf()", CustomScriptPlugin.NAME);
- }
- }));
-
- // get the sum of doc freqs in all docs
- scripts.put("_index['float_payload_field'].sumdf()",
- vars -> indexFieldScript(vars, "float_payload_field", indexField -> {
- try {
- return indexField.sumdf();
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "sumdf()", CustomScriptPlugin.NAME);
- }
- }));
-
- // get the sum of doc freqs in all docs for non existent field
- scripts.put("_index['non_existent_field'].sumdf()",
- vars -> indexFieldScript(vars, "non_existent_field", indexField -> {
- try {
- return indexField.sumdf();
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "sumdf()", CustomScriptPlugin.NAME);
- }
- }));
-
- // check term frequencies for 'a'
- scripts.put("term = _index['float_payload_field']['a']; if (term != null) {term.tf()}",
- vars -> indexFieldTermScript(vars, "float_payload_field", "a", indexFieldTerm -> {
- try {
- if (indexFieldTerm != null) {
- return indexFieldTerm.tf();
- }
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "term.tf()", CustomScriptPlugin.NAME);
- }
- return null;
- }));
-
- // check doc frequencies for 'c'
- scripts.put("term = _index['float_payload_field']['c']; if (term != null) {term.df()}",
- vars -> indexFieldTermScript(vars, "float_payload_field", "c", indexFieldTerm -> {
- try {
- if (indexFieldTerm != null) {
- return indexFieldTerm.df();
- }
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "term.df()", CustomScriptPlugin.NAME);
- }
- return null;
- }));
-
- // check doc frequencies for term that does not exist
- scripts.put("term = _index['float_payload_field']['non_existent_term']; if (term != null) {term.df()}",
- vars -> indexFieldTermScript(vars, "float_payload_field", "non_existent_term", indexFieldTerm -> {
- try {
- if (indexFieldTerm != null) {
- return indexFieldTerm.df();
- }
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "term.df()", CustomScriptPlugin.NAME);
- }
- return null;
- }));
-
- // check doc frequencies for term that does not exist
- scripts.put("term = _index['non_existent_field']['non_existent_term']; if (term != null) {term.tf()}",
- vars -> indexFieldTermScript(vars, "non_existent_field", "non_existent_term", indexFieldTerm -> {
- try {
- if (indexFieldTerm != null) {
- return indexFieldTerm.tf();
- }
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "term.tf()", CustomScriptPlugin.NAME);
- }
- return null;
- }));
-
- // check total term frequencies for 'a'
- scripts.put("term = _index['float_payload_field']['a']; if (term != null) {term.ttf()}",
- vars -> indexFieldTermScript(vars, "float_payload_field", "a", indexFieldTerm -> {
- try {
- if (indexFieldTerm != null) {
- return indexFieldTerm.ttf();
- }
- } catch (IOException e) {
- throw new ScriptException(e.getMessage(), e, emptyList(), "term.ttf()", CustomScriptPlugin.NAME);
- }
- return null;
- }));
-
- return scripts;
- }
-
- @SuppressWarnings("unchecked")
- static Object indexFieldScript(Map<String, Object> vars, String fieldName, Function<IndexField, Object> fn) {
- LeafIndexLookup leafIndexLookup = (LeafIndexLookup) vars.get("_index");
- return fn.apply(leafIndexLookup.get(fieldName));
- }
-
- @SuppressWarnings("unchecked")
- static Object indexFieldTermScript(Map<String, Object> vars, String fieldName, String term, Function<IndexFieldTerm, Object> fn) {
- return indexFieldScript(vars, fieldName, indexField -> fn.apply(indexField.get(term)));
- }
-
- @SuppressWarnings("unchecked")
- static Object tf(Map<String, Object> vars, String fieldName, String term) {
- return indexFieldTermScript(vars, fieldName, term, indexFieldTerm -> {
- try {
- return indexFieldTerm.tf();
- } catch (IOException e) {
- throw new RuntimeException("Mocked script error when retrieving TF for [" + fieldName + "][" + term + "]");
- }
- });
- }
-
- // Sum the payloads for a given field term, equivalent to:
- // term = _index['float_payload_field'].get('b', _FREQUENCIES|_OFFSETS|_PAYLOADS|_POSITIONS|_CACHE);
- // payloadSum=0;
- // for (pos in term) {
- // payloadSum += pos.payloadAsInt(0)
- // };
- // return payloadSum;
- @SuppressWarnings("unchecked")
- static Object payloadSum(Map<String, Object> vars, String fieldName, String term) {
- return indexFieldScript(vars, fieldName, indexField -> {
- IndexFieldTerm indexFieldTerm = indexField.get(term, IndexLookup.FLAG_FREQUENCIES
- | IndexLookup.FLAG_OFFSETS
- | IndexLookup.FLAG_PAYLOADS
- | IndexLookup.FLAG_POSITIONS
- | IndexLookup.FLAG_CACHE);
- int payloadSum = 0;
- for (TermPosition position : indexFieldTerm) {
- payloadSum += position.payloadAsInt(0);
- }
- return payloadSum;
- });
- }
-
- @SuppressWarnings("unchecked")
- static List<Object> createPositionsArrayScriptGetInfoObjectTwice(Map<String, Object> vars, String term, int flags,
- Function<TermPosition, Object> fn) {
- LeafIndexLookup leafIndexLookup = (LeafIndexLookup) vars.get("_index");
- IndexField indexField = leafIndexLookup.get("int_payload_field");
-
- // 1st call
- IndexFieldTerm indexFieldTerm = indexField.get(term, flags);
-
- List<Object> array = new ArrayList<>();
- for (TermPosition position : indexFieldTerm) {
- array.add(fn.apply(position));
- }
-
- // 2nd call
- indexField.get(term, flags);
-
- array = new ArrayList<>();
- for (TermPosition position : indexFieldTerm) {
- array.add(fn.apply(position));
- }
-
- return array;
- }
-
- @SuppressWarnings("unchecked")
- static List<Object> createPositionsArrayScriptIterateTwice(Map<String, Object> vars, String term, int flags,
- Function<TermPosition, Object> fn) {
- LeafIndexLookup leafIndexLookup = (LeafIndexLookup) vars.get("_index");
- IndexField indexField = leafIndexLookup.get("int_payload_field");
-
- IndexFieldTerm indexFieldTerm = indexField.get(term, flags);
-
- // 1st iteration
- List<Object> array = new ArrayList<>();
- for (TermPosition position : indexFieldTerm) {
- array.add(fn.apply(position));
- }
-
- // 2nd iteration
- array = new ArrayList<>();
- for (TermPosition position : indexFieldTerm) {
- array.add(fn.apply(position));
- }
-
- return array;
- }
-
- @SuppressWarnings("unchecked")
- static List<Object> createPositionsArrayScript(Map<String, Object> vars, String field, String term, int flags,
- Function<TermPosition, Object> fn) {
-
- LeafIndexLookup leafIndexLookup = (LeafIndexLookup) vars.get("_index");
- IndexField indexField = leafIndexLookup.get(field);
-
- IndexFieldTerm indexFieldTerm = indexField.get(term, flags);
- List<Object> array = new ArrayList<>();
- for (TermPosition position : indexFieldTerm) {
- array.add(fn.apply(position));
- }
- return array;
- }
- }
-
- void initTestData() throws InterruptedException, ExecutionException, IOException {
- HashMap<String, List<Object>> emptyArray = new HashMap<>();
- List<Object> empty1 = new ArrayList<>();
- empty1.add(-1);
- empty1.add(-1);
- emptyArray.put("1", empty1);
- List<Object> empty2 = new ArrayList<>();
- empty2.add(-1);
- empty2.add(-1);
- emptyArray.put("2", empty2);
- List<Object> empty3 = new ArrayList<>();
- empty3.add(-1);
- empty3.add(-1);
- emptyArray.put("3", empty3);
-
- expectedPositionsArray = new HashMap<>();
-
- List<Object> pos1 = new ArrayList<>();
- pos1.add(1);
- pos1.add(2);
- expectedPositionsArray.put("1", pos1);
- List<Object> pos2 = new ArrayList<>();
- pos2.add(0);
- pos2.add(1);
- expectedPositionsArray.put("2", pos2);
- List<Object> pos3 = new ArrayList<>();
- pos3.add(0);
- pos3.add(4);
- expectedPositionsArray.put("3", pos3);
-
- expectedPayloadsArray = new HashMap<>();
- List<Object> pay1 = new ArrayList<>();
- pay1.add(2);
- pay1.add(3);
- expectedPayloadsArray.put("1", pay1);
- List<Object> pay2 = new ArrayList<>();
- pay2.add(1);
- pay2.add(2);
- expectedPayloadsArray.put("2", pay2);
- List<Object> pay3 = new ArrayList<>();
- pay3.add(1);
- pay3.add(-1);
- expectedPayloadsArray.put("3", pay3);
- /*
- * "a|1 b|2 b|3 c|4 d " "b|1 b|2 c|3 d|4 a " "b|1 c|2 d|3 a|4 b "
- */
- expectedStartOffsetsArray = new HashMap<>();
- List<Object> starts1 = new ArrayList<>();
- starts1.add(4);
- starts1.add(8);
- expectedStartOffsetsArray.put("1", starts1);
- List<Object> starts2 = new ArrayList<>();
- starts2.add(0);
- starts2.add(4);
- expectedStartOffsetsArray.put("2", starts2);
- List<Object> starts3 = new ArrayList<>();
- starts3.add(0);
- starts3.add(16);
- expectedStartOffsetsArray.put("3", starts3);
-
- expectedEndOffsetsArray = new HashMap<>();
- List<Object> ends1 = new ArrayList<>();
- ends1.add(7);
- ends1.add(11);
- expectedEndOffsetsArray.put("1", ends1);
- List<Object> ends2 = new ArrayList<>();
- ends2.add(3);
- ends2.add(7);
- expectedEndOffsetsArray.put("2", ends2);
- List<Object> ends3 = new ArrayList<>();
- ends3.add(3);
- ends3.add(17);
- expectedEndOffsetsArray.put("3", ends3);
-
- XContentBuilder mapping = XContentFactory.jsonBuilder()
- .startObject()
- .startObject("type1")
- .startObject("properties")
- .startObject("int_payload_field")
- .field("type", "text")
- .field("index_options", "offsets")
- .field("analyzer", "payload_int")
- .endObject()
- .endObject()
- .endObject()
- .endObject();
-
- assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
- Settings.builder()
- .put(indexSettings())
- .put("index.analysis.analyzer.payload_int.tokenizer", "whitespace")
- .putArray("index.analysis.analyzer.payload_int.filter", "delimited_int")
- .put("index.analysis.filter.delimited_int.delimiter", "|")
- .put("index.analysis.filter.delimited_int.encoding", "int")
- .put("index.analysis.filter.delimited_int.type", "delimited_payload_filter")));
- indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("int_payload_field", "a|1 b|2 b|3 c|4 d "), client()
- .prepareIndex("test", "type1", "2").setSource("int_payload_field", "b|1 b|2 c|3 d|4 a "),
- client().prepareIndex("test", "type1", "3").setSource("int_payload_field", "b|1 c|2 d|3 a|4 b "));
- ensureGreen();
- }
-
- public void testTwoScripts() throws Exception {
- initTestData();
-
- Script scriptFieldScript = createScript("term = _index['int_payload_field']['c']; term.tf()");
- Script scoreScript = createScript("term = _index['int_payload_field']['b']; term.tf()");
- Map<String, Object> expectedResultsField = new HashMap<>();
- expectedResultsField.put("1", 1);
- expectedResultsField.put("2", 1);
- expectedResultsField.put("3", 1);
- Map<String, Object> expectedResultsScore = new HashMap<>();
- expectedResultsScore.put("1", 2f);
- expectedResultsScore.put("2", 2f);
- expectedResultsScore.put("3", 2f);
- checkOnlyFunctionScore(scoreScript, expectedResultsScore, 3);
- checkValueInEachDocWithFunctionScore(scriptFieldScript, expectedResultsField, scoreScript, expectedResultsScore, 3);
-
- }
-
- public void testCallWithDifferentFlagsFails() throws Exception {
- initTestData();
- final int numPrimaries = getNumShards("test").numPrimaries;
- final String expectedError = "You must call get with all required flags! " +
- "Instead of _index['int_payload_field'].get('b', _FREQUENCIES) and _index['int_payload_field'].get('b', _POSITIONS)" +
- " call _index['int_payload_field'].get('b', _FREQUENCIES | _POSITIONS) once]";
-
- // should throw an exception, we cannot call with different flags twice
- // if the flags of the second call were not included in the first call.
- Script script = createScript("Call with different flags twice");
- try {
- SearchResponse response = client().prepareSearch("test")
- .setQuery(QueryBuilders.matchAllQuery())
- .addScriptField("tvtest", script)
- .get();
-
- // (partial) success when at least one shard succeeds
- assertThat(numPrimaries, greaterThan(response.getShardFailures().length));
- assertThat(response.getFailedShards(), greaterThanOrEqualTo(1));
-
- for (ShardSearchFailure failure : response.getShardFailures()) {
- assertThat(failure.reason(), containsString(expectedError));
- }
- } catch (SearchPhaseExecutionException e) {
- // Exception thrown when *all* shards fail
- assertThat(numPrimaries, equalTo(e.shardFailures().length));
- for (ShardSearchFailure failure : e.shardFailures()) {
- assertThat(failure.reason(), containsString(expectedError));
- }
- }
-
- // Should not throw an exception this way round
- script = createScript("Call with same flags twice");
- assertThat(client().prepareSearch("test")
- .setQuery(QueryBuilders.matchAllQuery())
- .addScriptField("tvtest", script)
- .get().getHits().getTotalHits(), greaterThan(0L));
- }
-
- private void checkOnlyFunctionScore(Script scoreScript, Map<String, Object> expectedScore, int numExpectedDocs) {
- SearchResponse sr = client().prepareSearch("test")
- .setQuery(QueryBuilders.functionScoreQuery(ScoreFunctionBuilders.scriptFunction(scoreScript))).execute()
- .actionGet();
- assertHitCount(sr, numExpectedDocs);
- for (SearchHit hit : sr.getHits().getHits()) {
- assertThat("for doc " + hit.getId(), ((Float) expectedScore.get(hit.getId())).doubleValue(),
- Matchers.closeTo(hit.getScore(), 1.e-4));
- }
- }
-
- public void testDocumentationExample() throws Exception {
- initTestData();
-
- Script script = createScript("Sum the payloads of [float_payload_field][b]");
-
- // non existing field: sum should be 0
- HashMap<String, Object> zeroArray = new HashMap<>();
- zeroArray.put("1", 0);
- zeroArray.put("2", 0);
- zeroArray.put("3", 0);
- checkValueInEachDoc(script, zeroArray, 3);
-
- script = createScript("Sum the payloads of [int_payload_field][b]");
-
- // existing field: sums should be as here:
- zeroArray.put("1", 5);
- zeroArray.put("2", 3);
- zeroArray.put("3", 1);
- checkValueInEachDoc(script, zeroArray, 3);
- }
-
- public void testIteratorAndRecording() throws Exception {
- initTestData();
-
- // call twice with record: should work as expected
- Script script = createPositionsArrayScriptIterateTwice("b", INCLUDE_ALL, "position");
- checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
- script = createPositionsArrayScriptIterateTwice("b", INCLUDE_ALL, "startOffset");
- checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
- script = createPositionsArrayScriptIterateTwice("b", INCLUDE_ALL, "endOffset");
- checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
- script = createPositionsArrayScriptIterateTwice("b", INCLUDE_ALL, "payloadAsInt(-1)");
- checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
-
- // no record and get iterator twice: should fail
- script = createPositionsArrayScriptIterateTwice("b", INCLUDE_ALL_BUT_CACHE, "position");
- checkExceptions(script);
- script = createPositionsArrayScriptIterateTwice("b", INCLUDE_ALL_BUT_CACHE, "startOffset");
- checkExceptions(script);
- script = createPositionsArrayScriptIterateTwice("b", INCLUDE_ALL_BUT_CACHE, "endOffset");
- checkExceptions(script);
- script = createPositionsArrayScriptIterateTwice("b", INCLUDE_ALL_BUT_CACHE, "payloadAsInt(-1)");
- checkExceptions(script);
-
- // no record and get termObject twice and iterate: should fail
- script = createPositionsArrayScriptGetInfoObjectTwice("b", INCLUDE_ALL_BUT_CACHE, "position");
- checkExceptions(script);
- script = createPositionsArrayScriptGetInfoObjectTwice("b", INCLUDE_ALL_BUT_CACHE, "startOffset");
- checkExceptions(script);
- script = createPositionsArrayScriptGetInfoObjectTwice("b", INCLUDE_ALL_BUT_CACHE, "endOffset");
- checkExceptions(script);
- script = createPositionsArrayScriptGetInfoObjectTwice("b", INCLUDE_ALL_BUT_CACHE, "payloadAsInt(-1)");
- checkExceptions(script);
-
- }
-
- private Script createPositionsArrayScriptGetInfoObjectTwice(String term, String flags, String what) {
- return createScript("createPositionsArrayScriptGetInfoObjectTwice[" + term + "," + flags + "," + what + "]");
- }
-
- private Script createPositionsArrayScriptIterateTwice(String term, String flags, String what) {
- return createScript("createPositionsArrayScriptIterateTwice[" + term + "," + flags + "," + what + "]");
- }
-
- private Script createPositionsArrayScript(String field, String term, String flags, String what) {
- return createScript("createPositionsArrayScript[" + field + "," + term + "," + flags + "," + what + "]");
- }
-
- private Script createPositionsArrayScriptDefaultGet(String field, String term, String what) {
- return createScript("createPositionsArrayScriptDefaultGet[" + field + "," + term + "," + what + "]");
- }
-
- private Script createScript(String script) {
- return new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, script, Collections.emptyMap());
- }
-
- public void testFlags() throws Exception {
- initTestData();
-
- // check default flag
- Script script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "position");
- // there should be no positions
- /* TODO: the following tests fail with the new postings enum apis because of a bogus assert in BlockDocsEnum
- checkArrayValsInEachDoc(script, emptyArray, 3);
- script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "startOffset");
- // there should be no offsets
- checkArrayValsInEachDoc(script, emptyArray, 3);
- script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "endOffset");
- // there should be no offsets
- checkArrayValsInEachDoc(script, emptyArray, 3);
- script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "payloadAsInt(-1)");
- // there should be no payload
- checkArrayValsInEachDoc(script, emptyArray, 3);
-
- // check FLAG_FREQUENCIES flag
- script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "position");
- // there should be no positions
- checkArrayValsInEachDoc(script, emptyArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "startOffset");
- // there should be no offsets
- checkArrayValsInEachDoc(script, emptyArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "endOffset");
- // there should be no offsets
- checkArrayValsInEachDoc(script, emptyArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "payloadAsInt(-1)");
- // there should be no payloads
- checkArrayValsInEachDoc(script, emptyArray, 3);*/
-
- // check FLAG_POSITIONS flag
- script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "position");
- // there should be positions
- checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
- /* TODO: these tests make a bogus assumption that asking for positions will return only positions
- script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "startOffset");
- // there should be no offsets
- checkArrayValsInEachDoc(script, emptyArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "endOffset");
- // there should be no offsets
- checkArrayValsInEachDoc(script, emptyArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "payloadAsInt(-1)");
- // there should be no payloads
- checkArrayValsInEachDoc(script, emptyArray, 3);*/
-
- // check FLAG_OFFSETS flag
- script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "position");
- // there should be positions and s forth ...
- checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "startOffset");
- checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "endOffset");
- checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "payloadAsInt(-1)");
- checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
-
- // check FLAG_PAYLOADS flag
- script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "position");
- checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "startOffset");
- checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "endOffset");
- checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "payloadAsInt(-1)");
- checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
-
- // check all flags
- String allFlags = "_POSITIONS|_OFFSETS|_PAYLOADS";
- script = createPositionsArrayScript("int_payload_field", "b", allFlags, "position");
- checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", allFlags, "startOffset");
- checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", allFlags, "endOffset");
- checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", allFlags, "payloadAsInt(-1)");
- checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
-
- // check all flags without record
- script = createPositionsArrayScript("int_payload_field", "b", INCLUDE_ALL_BUT_CACHE, "position");
- checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", INCLUDE_ALL_BUT_CACHE, "startOffset");
- checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", INCLUDE_ALL_BUT_CACHE, "endOffset");
- checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
- script = createPositionsArrayScript("int_payload_field", "b", INCLUDE_ALL_BUT_CACHE, "payloadAsInt(-1)");
- checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
-
- }
-
- private void checkArrayValsInEachDoc(Script script, HashMap<String, List<Object>> expectedArray, int expectedHitSize) {
- SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
- .execute().actionGet();
- assertHitCount(sr, expectedHitSize);
- int nullCounter = 0;
- for (SearchHit hit : sr.getHits().getHits()) {
- Object result = hit.getFields().get("tvtest").getValues();
- Object expectedResult = expectedArray.get(hit.getId());
- assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
- if (expectedResult != null) {
- nullCounter++;
- }
- }
- assertThat(nullCounter, equalTo(expectedArray.size()));
- }
-
- public void testAllExceptPosAndOffset() throws Exception {
- XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
- .startObject("float_payload_field").field("type", "text").field("index_options", "offsets").field("term_vector", "no")
- .field("analyzer", "payload_float").endObject().startObject("string_payload_field").field("type", "text")
- .field("index_options", "offsets").field("term_vector", "no").field("analyzer", "payload_string").endObject()
- .startObject("int_payload_field").field("type", "text").field("index_options", "offsets")
- .field("analyzer", "payload_int").endObject().endObject().endObject().endObject();
- assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
- Settings.builder()
- .put(indexSettings())
- .put("index.analysis.analyzer.payload_float.tokenizer", "whitespace")
- .putArray("index.analysis.analyzer.payload_float.filter", "delimited_float")
- .put("index.analysis.filter.delimited_float.delimiter", "|")
- .put("index.analysis.filter.delimited_float.encoding", "float")
- .put("index.analysis.filter.delimited_float.type", "delimited_payload_filter")
- .put("index.analysis.analyzer.payload_string.tokenizer", "whitespace")
- .putArray("index.analysis.analyzer.payload_string.filter", "delimited_string")
- .put("index.analysis.filter.delimited_string.delimiter", "|")
- .put("index.analysis.filter.delimited_string.encoding", "identity")
- .put("index.analysis.filter.delimited_string.type", "delimited_payload_filter")
- .put("index.analysis.analyzer.payload_int.tokenizer", "whitespace")
- .putArray("index.analysis.analyzer.payload_int.filter", "delimited_int")
- .put("index.analysis.filter.delimited_int.delimiter", "|")
- .put("index.analysis.filter.delimited_int.encoding", "int")
- .put("index.analysis.filter.delimited_int.type", "delimited_payload_filter")
- .put("index.number_of_shards", 1)));
- indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("float_payload_field", "a|1 b|2 a|3 b "), client()
- .prepareIndex("test", "type1", "2").setSource("string_payload_field", "a|a b|b a|a b "),
- client().prepareIndex("test", "type1", "3").setSource("float_payload_field", "a|4 b|5 a|6 b "),
- client().prepareIndex("test", "type1", "4").setSource("string_payload_field", "a|b b|a a|b b "),
- client().prepareIndex("test", "type1", "5").setSource("float_payload_field", "c "),
- client().prepareIndex("test", "type1", "6").setSource("int_payload_field", "c|1"));
-
- // get the number of all docs
- Script script = createScript("_index.numDocs()");
- checkValueInEachDoc(6, script, 6);
-
- // get the number of docs with field float_payload_field
- script = createScript("_index['float_payload_field'].docCount()");
- checkValueInEachDoc(3, script, 6);
-
- // corner case: what if the field does not exist?
- script = createScript("_index['non_existent_field'].docCount()");
- checkValueInEachDoc(0, script, 6);
-
- // get the number of all tokens in all docs
- script = createScript("_index['float_payload_field'].sumttf()");
- checkValueInEachDoc(9, script, 6);
-
- // corner case get the number of all tokens in all docs for non existent
- // field
- script = createScript("_index['non_existent_field'].sumttf()");
- checkValueInEachDoc(0, script, 6);
-
- // get the sum of doc freqs in all docs
- script = createScript("_index['float_payload_field'].sumdf()");
- checkValueInEachDoc(5, script, 6);
-
- // get the sum of doc freqs in all docs for non existent field
- script = createScript("_index['non_existent_field'].sumdf()");
- checkValueInEachDoc(0, script, 6);
-
- // check term frequencies for 'a'
- script = createScript("term = _index['float_payload_field']['a']; if (term != null) {term.tf()}");
- Map<String, Object> expectedResults = new HashMap<>();
- expectedResults.put("1", 2);
- expectedResults.put("2", 0);
- expectedResults.put("3", 2);
- expectedResults.put("4", 0);
- expectedResults.put("5", 0);
- expectedResults.put("6", 0);
- checkValueInEachDoc(script, expectedResults, 6);
- expectedResults.clear();
-
- // check doc frequencies for 'c'
- script = createScript("term = _index['float_payload_field']['c']; if (term != null) {term.df()}");
- expectedResults.put("1", 1L);
- expectedResults.put("2", 1L);
- expectedResults.put("3", 1L);
- expectedResults.put("4", 1L);
- expectedResults.put("5", 1L);
- expectedResults.put("6", 1L);
- checkValueInEachDoc(script, expectedResults, 6);
- expectedResults.clear();
-
- // check doc frequencies for term that does not exist
- script = createScript("term = _index['float_payload_field']['non_existent_term']; if (term != null) {term.df()}");
- expectedResults.put("1", 0L);
- expectedResults.put("2", 0L);
- expectedResults.put("3", 0L);
- expectedResults.put("4", 0L);
- expectedResults.put("5", 0L);
- expectedResults.put("6", 0L);
- checkValueInEachDoc(script, expectedResults, 6);
- expectedResults.clear();
-
- // check doc frequencies for term that does not exist
- script = createScript("term = _index['non_existent_field']['non_existent_term']; if (term != null) {term.tf()}");
- expectedResults.put("1", 0);
- expectedResults.put("2", 0);
- expectedResults.put("3", 0);
- expectedResults.put("4", 0);
- expectedResults.put("5", 0);
- expectedResults.put("6", 0);
- checkValueInEachDoc(script, expectedResults, 6);
- expectedResults.clear();
-
- // check total term frequencies for 'a'
- script = createScript("term = _index['float_payload_field']['a']; if (term != null) {term.ttf()}");
- expectedResults.put("1", 4L);
- expectedResults.put("2", 4L);
- expectedResults.put("3", 4L);
- expectedResults.put("4", 4L);
- expectedResults.put("5", 4L);
- expectedResults.put("6", 4L);
- checkValueInEachDoc(script, expectedResults, 6);
- expectedResults.clear();
-
- // check float payload for 'b'
- HashMap<String, List<Object>> expectedPayloadsArray = new HashMap<>();
- script = createPositionsArrayScript("float_payload_field", "b", INCLUDE_ALL, "payloadAsFloat(-1)");
- float missingValue = -1;
- List<Object> payloadsFor1 = new ArrayList<>();
- payloadsFor1.add(2f);
- payloadsFor1.add(missingValue);
- expectedPayloadsArray.put("1", payloadsFor1);
- List<Object> payloadsFor2 = new ArrayList<>();
- payloadsFor2.add(5f);
- payloadsFor2.add(missingValue);
- expectedPayloadsArray.put("3", payloadsFor2);
- expectedPayloadsArray.put("6", new ArrayList<>());
- expectedPayloadsArray.put("5", new ArrayList<>());
- expectedPayloadsArray.put("4", new ArrayList<>());
- expectedPayloadsArray.put("2", new ArrayList<>());
- checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
-
- // check string payload for 'b'
- expectedPayloadsArray.clear();
- payloadsFor1.clear();
- payloadsFor2.clear();
- script = createPositionsArrayScript("string_payload_field", "b", INCLUDE_ALL, "payloadAsString()");
- payloadsFor1.add("b");
- payloadsFor1.add(null);
- expectedPayloadsArray.put("2", payloadsFor1);
- payloadsFor2.add("a");
- payloadsFor2.add(null);
- expectedPayloadsArray.put("4", payloadsFor2);
- expectedPayloadsArray.put("6", new ArrayList<>());
- expectedPayloadsArray.put("5", new ArrayList<>());
- expectedPayloadsArray.put("3", new ArrayList<>());
- expectedPayloadsArray.put("1", new ArrayList<>());
- checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
-
- // check int payload for 'c'
- expectedPayloadsArray.clear();
- payloadsFor1.clear();
- payloadsFor2.clear();
- script = createPositionsArrayScript("int_payload_field", "c", INCLUDE_ALL, "payloadAsInt(-1)");
- payloadsFor1 = new ArrayList<>();
- payloadsFor1.add(1);
- expectedPayloadsArray.put("6", payloadsFor1);
- expectedPayloadsArray.put("5", new ArrayList<>());
- expectedPayloadsArray.put("4", new ArrayList<>());
- expectedPayloadsArray.put("3", new ArrayList<>());
- expectedPayloadsArray.put("2", new ArrayList<>());
- expectedPayloadsArray.put("1", new ArrayList<>());
- checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
-
- }
-
- private void checkExceptions(Script script) {
- try {
- SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
- .execute().actionGet();
- assertThat(sr.getHits().getHits().length, equalTo(0));
- ShardSearchFailure[] shardFails = sr.getShardFailures();
- for (ShardSearchFailure fail : shardFails) {
- assertThat(fail.reason().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."),
- Matchers.greaterThan(-1));
- }
- } catch (SearchPhaseExecutionException ex) {
- assertThat(
- "got " + ex.toString(),
- ex.toString().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."),
- Matchers.greaterThan(-1));
- }
- }
-
- private void checkValueInEachDocWithFunctionScore(Script fieldScript, Map<String, Object> expectedFieldVals, Script scoreScript,
- Map<String, Object> expectedScore, int numExpectedDocs) {
- SearchResponse sr = client().prepareSearch("test")
- .setQuery(QueryBuilders.functionScoreQuery(ScoreFunctionBuilders.scriptFunction(scoreScript)))
- .addScriptField("tvtest", fieldScript).execute().actionGet();
- assertHitCount(sr, numExpectedDocs);
- for (SearchHit hit : sr.getHits().getHits()) {
- Object result = hit.getFields().get("tvtest").getValues().get(0);
- Object expectedResult = expectedFieldVals.get(hit.getId());
- assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
- assertThat("for doc " + hit.getId(), ((Float) expectedScore.get(hit.getId())).doubleValue(),
- Matchers.closeTo(hit.getScore(), 1.e-4));
- }
- }
-
- private void checkValueInEachDoc(Script script, Map<String, Object> expectedResults, int numExpectedDocs) {
- SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
- .execute().actionGet();
- assertHitCount(sr, numExpectedDocs);
- for (SearchHit hit : sr.getHits().getHits()) {
- Object result = hit.getFields().get("tvtest").getValues().get(0);
- Object expectedResult = expectedResults.get(hit.getId());
- assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
- }
- }
-
- private void checkValueInEachDoc(int value, Script script, int numExpectedDocs) {
- SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
- .execute().actionGet();
- assertHitCount(sr, numExpectedDocs);
- for (SearchHit hit : sr.getHits().getHits()) {
- Object result = hit.getFields().get("tvtest").getValues().get(0);
- if (result instanceof Integer) {
- assertThat(result, equalTo(value));
- } else if (result instanceof Long) {
- assertThat(((Long) result).intValue(), equalTo(value));
- } else {
- fail();
- }
- }
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java
index 7f56f3de4b..0960bc71be 100644
--- a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java
+++ b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java
@@ -55,6 +55,7 @@ public class NativeScriptTests extends ESTestCase {
CompiledScript compiledScript = scriptModule.getScriptService().compile(script, ScriptContext.Standard.SEARCH);
ExecutableScript executable = scriptModule.getScriptService().executable(compiledScript, script.getParams());
assertThat(executable.run().toString(), equalTo("test"));
+ assertWarnings("Native scripts are deprecated. Use a custom ScriptEngine to write scripts in java.");
}
public void testFineGrainedSettingsDontAffectNativeScripts() throws IOException {
@@ -82,6 +83,7 @@ public class NativeScriptTests extends ESTestCase {
assertThat(scriptService.compile(new Script(ScriptType.INLINE, NativeScriptEngine.NAME, "my", Collections.emptyMap()),
scriptContext), notNullValue());
}
+ assertWarnings("Native scripts are deprecated. Use a custom ScriptEngine to write scripts in java.");
}
public static class MyNativeScriptFactory implements NativeScriptFactory {
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java
index 8a167c0daf..ce57596497 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin;
@@ -51,6 +52,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.core.IsNull.notNullValue;
import static org.hamcrest.core.IsNull.nullValue;
@@ -866,6 +868,19 @@ public class DateRangeIT extends ESIntegTestCase {
assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true));
}
+ public void testNoRangesInQuery() {
+ try {
+ client().prepareSearch("idx")
+ .addAggregation(dateRange("my_date_range_agg").field("value"))
+ .execute().actionGet();
+ fail();
+ } catch (SearchPhaseExecutionException spee){
+ Throwable rootCause = spee.getCause().getCause();
+ assertThat(rootCause, instanceOf(IllegalArgumentException.class));
+ assertEquals(rootCause.getMessage(), "No [ranges] specified for the [my_date_range_agg] aggregation");
+ }
+ }
+
/**
* Make sure that a request using a script does not get cached and a request
* not using a script does get cached.
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java
index d8aab691d2..032bb8d591 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java
@@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.Version;
import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.geo.GeoPoint;
@@ -52,6 +53,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogra
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.sameInstance;
import static org.hamcrest.core.IsNull.notNullValue;
@@ -441,6 +443,19 @@ public class GeoDistanceIT extends ESIntegTestCase {
assertThat(buckets.get(0).getDocCount(), equalTo(0L));
}
+ public void testNoRangesInQuery() {
+ try {
+ client().prepareSearch("idx")
+ .addAggregation(geoDistance("geo_dist", new GeoPoint(52.3760, 4.894)))
+ .execute().actionGet();
+ fail();
+ } catch (SearchPhaseExecutionException spee){
+ Throwable rootCause = spee.getCause().getCause();
+ assertThat(rootCause, instanceOf(IllegalArgumentException.class));
+ assertEquals(rootCause.getMessage(), "No [ranges] specified for the [geo_dist] aggregation");
+ }
+ }
+
public void testMultiValues() throws Exception {
SearchResponse response = client().prepareSearch("idx-multi")
.addAggregation(geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894))
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalAggregatorTests.java
index 571a32b87b..67bec2acf7 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalAggregatorTests.java
@@ -78,17 +78,14 @@ public class GlobalAggregatorTests extends AggregatorTestCase {
aggregationBuilder.subAggregation(new MinAggregationBuilder("in_global").field("number"));
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
- try (GlobalAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- try {
- aggregator.preCollection();
- indexSearcher.search(new MatchAllDocsQuery(), aggregator);
- aggregator.postCollection();
- InternalGlobal result = (InternalGlobal) aggregator.buildAggregation(0L);
- verify.accept(result, (InternalMin) result.getAggregations().asMap().get("in_global"));
- } finally {
- IOUtils.close(aggregator.subAggregators());
- }
- }
+
+ GlobalAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(new MatchAllDocsQuery(), aggregator);
+ aggregator.postCollection();
+ InternalGlobal result = (InternalGlobal) aggregator.buildAggregation(0L);
+ verify.accept(result, (InternalMin) result.getAggregations().asMap().get("in_global"));
+
indexReader.close();
directory.close();
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java
index cc4818963a..b9bb46501d 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java
@@ -18,18 +18,9 @@
*/
package org.elasticsearch.search.aggregations.bucket;
-import org.elasticsearch.cluster.health.ClusterHealthStatus;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
-import static org.hamcrest.Matchers.containsString;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.ScriptPlugin;
@@ -42,6 +33,17 @@ import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.bucket.range.Range;
import org.elasticsearch.test.ESIntegTestCase;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.instanceOf;
+
@ESIntegTestCase.SuiteScopeTestCase
public class IpRangeIT extends ESIntegTestCase {
@@ -221,6 +223,20 @@ public class IpRangeIT extends ESIntegTestCase {
assertThat(e.getMessage(), containsString("[ip_range] does not support scripts"));
}
+ public void testNoRangesInQuery() {
+ try {
+ client().prepareSearch("idx").addAggregation(
+ AggregationBuilders.ipRange("my_range")
+ .field("ip"))
+ .execute().actionGet();
+ fail();
+ } catch (SearchPhaseExecutionException spee){
+ Throwable rootCause = spee.getCause().getCause();
+ assertThat(rootCause, instanceOf(IllegalArgumentException.class));
+ assertEquals(rootCause.getMessage(), "No [ranges] specified for the [my_range] aggregation");
+ }
+ }
+
public static class DummyScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public List<NativeScriptFactory> getNativeScripts() {
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java
index b4bb3c819d..c2a2405098 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java
@@ -19,6 +19,7 @@
package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.fielddata.ScriptDocValues;
@@ -53,6 +54,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.core.IsNull.notNullValue;
import static org.hamcrest.core.IsNull.nullValue;
@@ -661,6 +663,20 @@ public class RangeIT extends ESIntegTestCase {
assertThat(bucket.getDocCount(), equalTo(0L));
}
+ public void testNoRangesInQuery() {
+ try {
+ client().prepareSearch("idx")
+ .addAggregation(range("foobar")
+ .field(SINGLE_VALUED_FIELD_NAME))
+ .execute().actionGet();
+ fail();
+ } catch (SearchPhaseExecutionException spee){
+ Throwable rootCause = spee.getCause().getCause();
+ assertThat(rootCause, instanceOf(IllegalArgumentException.class));
+ assertEquals(rootCause.getMessage(), "No [ranges] specified for the [foobar] aggregation");
+ }
+ }
+
public void testScriptMultiValued() throws Exception {
Script script =
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "'].values", Collections.emptyMap());
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java
index 04147b245c..45b6b64cdd 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java
@@ -112,12 +112,13 @@ public class GeoHashGridAggregatorTests extends AggregatorTestCase {
MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType();
fieldType.setHasDocValues(true);
fieldType.setName(FIELD_NAME);
- try (Aggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(query, aggregator);
- aggregator.postCollection();
- verify.accept((InternalGeoHashGrid) aggregator.buildAggregation(0L));
- }
+
+ Aggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(query, aggregator);
+ aggregator.postCollection();
+ verify.accept((InternalGeoHashGrid) aggregator.buildAggregation(0L));
+
indexReader.close();
directory.close();
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java
index 7b93653fff..f54cb902d9 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java
@@ -75,21 +75,22 @@ public class TermsAggregatorTests extends AggregatorTestCase {
MappedFieldType fieldType = new KeywordFieldMapper.KeywordFieldType();
fieldType.setName("string");
fieldType.setHasDocValues(true );
- try (TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(new MatchAllDocsQuery(), aggregator);
- aggregator.postCollection();
- Terms result = (Terms) aggregator.buildAggregation(0L);
- assertEquals(4, result.getBuckets().size());
- assertEquals("a", result.getBuckets().get(0).getKeyAsString());
- assertEquals(2L, result.getBuckets().get(0).getDocCount());
- assertEquals("b", result.getBuckets().get(1).getKeyAsString());
- assertEquals(2L, result.getBuckets().get(1).getDocCount());
- assertEquals("c", result.getBuckets().get(2).getKeyAsString());
- assertEquals(1L, result.getBuckets().get(2).getDocCount());
- assertEquals("d", result.getBuckets().get(3).getKeyAsString());
- assertEquals(1L, result.getBuckets().get(3).getDocCount());
- }
+
+ TermsAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(new MatchAllDocsQuery(), aggregator);
+ aggregator.postCollection();
+ Terms result = (Terms) aggregator.buildAggregation(0L);
+ assertEquals(4, result.getBuckets().size());
+ assertEquals("a", result.getBuckets().get(0).getKeyAsString());
+ assertEquals(2L, result.getBuckets().get(0).getDocCount());
+ assertEquals("b", result.getBuckets().get(1).getKeyAsString());
+ assertEquals(2L, result.getBuckets().get(1).getDocCount());
+ assertEquals("c", result.getBuckets().get(2).getKeyAsString());
+ assertEquals(1L, result.getBuckets().get(2).getDocCount());
+ assertEquals("d", result.getBuckets().get(3).getKeyAsString());
+ assertEquals(1L, result.getBuckets().get(3).getDocCount());
+
}
indexReader.close();
directory.close();
@@ -191,12 +192,11 @@ public class TermsAggregatorTests extends AggregatorTestCase {
private InternalAggregation buildInternalAggregation(TermsAggregationBuilder builder, MappedFieldType fieldType,
IndexSearcher searcher) throws IOException {
- try (TermsAggregator aggregator = createAggregator(builder, searcher, fieldType)) {
- aggregator.preCollection();
- searcher.search(new MatchAllDocsQuery(), aggregator);
- aggregator.postCollection();
- return aggregator.buildAggregation(0L);
- }
+ TermsAggregator aggregator = createAggregator(builder, searcher, fieldType);
+ aggregator.preCollection();
+ searcher.search(new MatchAllDocsQuery(), aggregator);
+ aggregator.postCollection();
+ return aggregator.buildAggregation(0L);
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java
index b80dd163fc..90afe09529 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java
@@ -118,13 +118,13 @@ public class CardinalityAggregatorTests extends AggregatorTestCase {
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(
NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
- try (CardinalityAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher,
- fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(query, aggregator);
- aggregator.postCollection();
- verify.accept((InternalCardinality) aggregator.buildAggregation(0L));
- }
+ CardinalityAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher,
+ fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(query, aggregator);
+ aggregator.postCollection();
+ verify.accept((InternalCardinality) aggregator.buildAggregation(0L));
+
indexReader.close();
directory.close();
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java
index c53927a55b..8a1bc036fb 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java
@@ -112,12 +112,13 @@ public class MaxAggregatorTests extends AggregatorTestCase {
MaxAggregationBuilder aggregationBuilder = new MaxAggregationBuilder("_name").field("number");
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
- try (MaxAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(query, aggregator);
- aggregator.postCollection();
- verify.accept((InternalMax) aggregator.buildAggregation(0L));
- }
+
+ MaxAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(query, aggregator);
+ aggregator.postCollection();
+ verify.accept((InternalMax) aggregator.buildAggregation(0L));
+
indexReader.close();
directory.close();
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java
index 9ba7ecb71b..4e90a9083c 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregatorTests.java
@@ -113,12 +113,13 @@ public class AvgAggregatorTests extends AggregatorTestCase {
AvgAggregationBuilder aggregationBuilder = new AvgAggregationBuilder("_name").field("number");
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
- try (AvgAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(query, aggregator);
- aggregator.postCollection();
- verify.accept((InternalAvg) aggregator.buildAggregation(0L));
- }
+
+ AvgAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(query, aggregator);
+ aggregator.postCollection();
+ verify.accept((InternalAvg) aggregator.buildAggregation(0L));
+
indexReader.close();
directory.close();
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorTests.java
index 48b0b115e8..3408742160 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregatorTests.java
@@ -62,13 +62,14 @@ public class MinAggregatorTests extends AggregatorTestCase {
MinAggregationBuilder aggregationBuilder = new MinAggregationBuilder("_name").field("number");
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
- try (MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(new MatchAllDocsQuery(), aggregator);
- aggregator.postCollection();
- InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
- assertEquals(-1.0, result.getValue(), 0);
- }
+
+ MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(new MatchAllDocsQuery(), aggregator);
+ aggregator.postCollection();
+ InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
+ assertEquals(-1.0, result.getValue(), 0);
+
indexReader.close();
directory.close();
}
@@ -96,13 +97,14 @@ public class MinAggregatorTests extends AggregatorTestCase {
MinAggregationBuilder aggregationBuilder = new MinAggregationBuilder("_name").field("number");
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
- try (MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(new MatchAllDocsQuery(), aggregator);
- aggregator.postCollection();
- InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
- assertEquals(-1.0, result.getValue(), 0);
- }
+
+ MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(new MatchAllDocsQuery(), aggregator);
+ aggregator.postCollection();
+ InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
+ assertEquals(-1.0, result.getValue(), 0);
+
indexReader.close();
directory.close();
}
@@ -127,13 +129,14 @@ public class MinAggregatorTests extends AggregatorTestCase {
MinAggregationBuilder aggregationBuilder = new MinAggregationBuilder("_name").field("number2");
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number2");
- try (MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(new MatchAllDocsQuery(), aggregator);
- aggregator.postCollection();
- InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
- assertEquals(Double.POSITIVE_INFINITY, result.getValue(), 0);
- }
+
+ MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(new MatchAllDocsQuery(), aggregator);
+ aggregator.postCollection();
+ InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
+ assertEquals(Double.POSITIVE_INFINITY, result.getValue(), 0);
+
indexReader.close();
directory.close();
}
@@ -149,13 +152,14 @@ public class MinAggregatorTests extends AggregatorTestCase {
MinAggregationBuilder aggregationBuilder = new MinAggregationBuilder("_name").field("number");
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
- try (MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(new MatchAllDocsQuery(), aggregator);
- aggregator.postCollection();
- InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
- assertEquals(Double.POSITIVE_INFINITY, result.getValue(), 0);
- }
+
+ MinAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(new MatchAllDocsQuery(), aggregator);
+ aggregator.postCollection();
+ InternalMin result = (InternalMin) aggregator.buildAggregation(0L);
+ assertEquals(Double.POSITIVE_INFINITY, result.getValue(), 0);
+
indexReader.close();
directory.close();
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorTests.java
index f264243044..8aa160c8a8 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/HDRPercentilesAggregatorTests.java
@@ -127,12 +127,12 @@ public class HDRPercentilesAggregatorTests extends AggregatorTestCase {
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
- try (HDRPercentilesAggregator aggregator = createAggregator(builder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(query, aggregator);
- aggregator.postCollection();
- verify.accept((InternalHDRPercentiles) aggregator.buildAggregation(0L));
- }
+ HDRPercentilesAggregator aggregator = createAggregator(builder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(query, aggregator);
+ aggregator.postCollection();
+ verify.accept((InternalHDRPercentiles) aggregator.buildAggregation(0L));
+
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java
index 90cc2464a1..7f95b06b5a 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/TDigestPercentilesAggregatorTests.java
@@ -148,12 +148,11 @@ public class TDigestPercentilesAggregatorTests extends AggregatorTestCase {
MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG);
fieldType.setName("number");
- try (TDigestPercentilesAggregator aggregator = createAggregator(builder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(query, aggregator);
- aggregator.postCollection();
- verify.accept((InternalTDigestPercentiles) aggregator.buildAggregation(0L));
- }
+ TDigestPercentilesAggregator aggregator = createAggregator(builder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(query, aggregator);
+ aggregator.postCollection();
+ verify.accept((InternalTDigestPercentiles) aggregator.buildAggregation(0L));
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorTests.java
index 53731c5853..20f7512761 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregatorTests.java
@@ -132,13 +132,12 @@ public class SumAggregatorTests extends AggregatorTestCase {
SumAggregationBuilder aggregationBuilder = new SumAggregationBuilder("_name");
aggregationBuilder.field(FIELD_NAME);
- try (SumAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(query, aggregator);
- aggregator.postCollection();
+ SumAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(query, aggregator);
+ aggregator.postCollection();
- verify.accept((Sum) aggregator.buildAggregation(0L));
- }
+ verify.accept((Sum) aggregator.buildAggregation(0L));
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorTests.java
index b5aacbfcca..1da1807bfe 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorTests.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregatorTests.java
@@ -120,13 +120,11 @@ public class ValueCountAggregatorTests extends AggregatorTestCase {
ValueCountAggregationBuilder aggregationBuilder = new ValueCountAggregationBuilder("_name", valueType);
aggregationBuilder.field(FIELD_NAME);
- try (ValueCountAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType)) {
- aggregator.preCollection();
- indexSearcher.search(query, aggregator);
- aggregator.postCollection();
-
- verify.accept((ValueCount) aggregator.buildAggregation(0L));
- }
+ ValueCountAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, fieldType);
+ aggregator.preCollection();
+ indexSearcher.search(query, aggregator);
+ aggregator.postCollection();
+ verify.accept((ValueCount) aggregator.buildAggregation(0L));
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java
index d920c6a67b..316277973f 100644
--- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java
+++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java
@@ -1550,30 +1550,6 @@ public class SearchQueryIT extends ESIntegTestCase {
assertHitCount(searchResponse, 2);
}
- public void testMatchQueryWithStackedStems() throws IOException {
- CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(Settings.builder()
- .put(indexSettings())
- .put("index.analysis.analyzer.index.type", "custom")
- .put("index.analysis.analyzer.index.tokenizer", "standard")
- .put("index.analysis.analyzer.index.filter", "lowercase")
- .put("index.analysis.analyzer.search.type", "custom")
- .put("index.analysis.analyzer.search.tokenizer", "standard")
- .putArray("index.analysis.analyzer.search.filter", "lowercase", "keyword_repeat", "porter_stem", "unique_stem")
- .put("index.analysis.filter.unique_stem.type", "unique")
- .put("index.analysis.filter.unique_stem.only_on_same_position", true));
- assertAcked(builder.addMapping("test", "text", "type=text,analyzer=index,search_analyzer=search"));
-
- client().prepareIndex("test", "test", "1").setSource("text", "the fox runs across the street").get();
- refresh();
- SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fox runs").operator(Operator.AND)).get();
- assertHitCount(searchResponse, 1);
-
- client().prepareIndex("test", "test", "2").setSource("text", "run fox run").get();
- refresh();
- searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fox runs").operator(Operator.AND)).get();
- assertHitCount(searchResponse, 2);
- }
-
public void testQueryStringWithSynonyms() throws IOException {
CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(Settings.builder()
.put(indexSettings())
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java
index 864f060be0..46a94e641c 100644
--- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java
+++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java
@@ -694,107 +694,6 @@ public class SuggestSearchIT extends ESIntegTestCase {
assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
}
- public void testPhraseBoundaryCases() throws IOException, URISyntaxException {
- CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(Settings.builder()
- .put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1) // to get reliable statistics we should put this all into one shard
- .put("index.analysis.analyzer.body.tokenizer", "standard")
- .putArray("index.analysis.analyzer.body.filter", "lowercase")
- .put("index.analysis.analyzer.bigram.tokenizer", "standard")
- .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
- .put("index.analysis.analyzer.ngram.tokenizer", "standard")
- .putArray("index.analysis.analyzer.ngram.filter", "my_shingle2", "lowercase")
- .put("index.analysis.analyzer.myDefAnalyzer.tokenizer", "standard")
- .putArray("index.analysis.analyzer.myDefAnalyzer.filter", "shingle", "lowercase")
- .put("index.analysis.filter.my_shingle.type", "shingle")
- .put("index.analysis.filter.my_shingle.output_unigrams", false)
- .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
- .put("index.analysis.filter.my_shingle.max_shingle_size", 2)
- .put("index.analysis.filter.my_shingle2.type", "shingle")
- .put("index.analysis.filter.my_shingle2.output_unigrams", true)
- .put("index.analysis.filter.my_shingle2.min_shingle_size", 2)
- .put("index.analysis.filter.my_shingle2.max_shingle_size", 2));
-
- XContentBuilder mapping = XContentFactory.jsonBuilder()
- .startObject().startObject("type1")
- .startObject("properties")
- .startObject("body").field("type", "text").field("analyzer", "body").endObject()
- .startObject("bigram").field("type", "text").field("analyzer", "bigram").endObject()
- .startObject("ngram").field("type", "text").field("analyzer", "ngram").endObject()
- .endObject()
- .endObject().endObject();
- assertAcked(builder.addMapping("type1", mapping));
- ensureGreen();
-
- String[] strings = new String[]{
- "Xorr the God-Jewel",
- "Grog the God-Crusher",
- "Xorn",
- "Walter Newell",
- "Wanda Maximoff",
- "Captain America",
- "American Ace",
- "Wundarr the Aquarian",
- "Will o' the Wisp",
- "Xemnu the Titan"
- };
- for (String line : strings) {
- index("test", "type1", line, "body", line, "bigram", line, "ngram", line);
- }
- refresh();
-
- NumShards numShards = getNumShards("test");
-
- // Lets make sure some things throw exceptions
- PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("bigram")
- .analyzer("body")
- .addCandidateGenerator(candidateGenerator("does_not_exist").minWordLength(1).suggestMode("always"))
- .realWordErrorLikelihood(0.95f)
- .maxErrors(0.5f)
- .size(1);
- phraseSuggestion.clearCandidateGenerators().analyzer(null);
- try {
- searchSuggest("xor the got-jewel", numShards.numPrimaries, Collections.singletonMap("simple_phrase", phraseSuggestion));
- fail("analyzer does only produce ngrams");
- } catch (SearchPhaseExecutionException e) {
- }
-
- phraseSuggestion.analyzer("bigram");
- try {
- searchSuggest("xor the got-jewel", numShards.numPrimaries, Collections.singletonMap("simple_phrase", phraseSuggestion));
- fail("analyzer does only produce ngrams");
- } catch (SearchPhaseExecutionException e) {
- }
-
- // Now we'll make sure some things don't
- phraseSuggestion.forceUnigrams(false);
- searchSuggest( "xor the got-jewel", 0, Collections.singletonMap("simple_phrase", phraseSuggestion));
-
- // Field doesn't produce unigrams but the analyzer does
- phraseSuggestion.forceUnigrams(true).analyzer("ngram");
- searchSuggest( "xor the got-jewel", 0, Collections.singletonMap("simple_phrase", phraseSuggestion));
-
- phraseSuggestion = phraseSuggestion("ngram")
- .analyzer("myDefAnalyzer")
- .forceUnigrams(true)
- .realWordErrorLikelihood(0.95f)
- .maxErrors(0.5f)
- .size(1)
- .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"));
- Suggest suggest = searchSuggest( "xor the got-jewel", 0, Collections.singletonMap("simple_phrase", phraseSuggestion));
-
- // "xorr the god jewel" and and "xorn the god jewel" have identical scores (we are only using unigrams to score), so we tie break by
- // earlier term (xorn):
- assertSuggestion(suggest, 0, "simple_phrase", "xorn the god jewel");
-
- phraseSuggestion.analyzer(null);
- suggest = searchSuggest( "xor the got-jewel", 0, Collections.singletonMap("simple_phrase", phraseSuggestion));
-
- // In this case xorr has a better score than xorn because we set the field back to the default (my_shingle2) analyzer, so the
- // probability that the term is not in the dictionary but is NOT a misspelling is relatively high in this case compared to the
- // others that have no n-gram with the other terms in the phrase :) you can set this realWorldErrorLikelyhood
- assertSuggestion(suggest, 0, "simple_phrase", "xorr the god jewel");
- }
-
public void testDifferentShardSize() throws Exception {
createIndex("test");
ensureGreen();
diff --git a/core/src/test/java/org/elasticsearch/threadpool/AutoQueueAdjustingExecutorBuilderTests.java b/core/src/test/java/org/elasticsearch/threadpool/AutoQueueAdjustingExecutorBuilderTests.java
new file mode 100644
index 0000000000..836193423f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/threadpool/AutoQueueAdjustingExecutorBuilderTests.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import org.elasticsearch.common.settings.Settings;
+
+import static org.hamcrest.CoreMatchers.containsString;
+
+public class AutoQueueAdjustingExecutorBuilderTests extends ESThreadPoolTestCase {
+
+ public void testValidatingMinMaxSettings() throws Exception {
+ Settings settings = Settings.builder()
+ .put("thread_pool.search.min_queue_size", randomIntBetween(30, 100))
+ .put("thread_pool.search.max_queue_size", randomIntBetween(1,25))
+ .build();
+ try {
+ new AutoQueueAdjustingExecutorBuilder(settings, "test", 1, 15, 1, 100, 10);
+ fail("should have thrown an exception");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("Failed to parse value"));
+ }
+ }
+
+}
diff --git a/core/src/test/resources/indices/bwc/index-5.2.2.zip b/core/src/test/resources/indices/bwc/index-5.2.2.zip
new file mode 100644
index 0000000000..63f7e72821
--- /dev/null
+++ b/core/src/test/resources/indices/bwc/index-5.2.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/index-5.3.2.zip b/core/src/test/resources/indices/bwc/index-5.3.2.zip
new file mode 100644
index 0000000000..f0bed79b79
--- /dev/null
+++ b/core/src/test/resources/indices/bwc/index-5.3.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-5.2.2.zip b/core/src/test/resources/indices/bwc/repo-5.2.2.zip
new file mode 100644
index 0000000000..0a9a2771e8
--- /dev/null
+++ b/core/src/test/resources/indices/bwc/repo-5.2.2.zip
Binary files differ
diff --git a/core/src/test/resources/indices/bwc/repo-5.3.2.zip b/core/src/test/resources/indices/bwc/repo-5.3.2.zip
new file mode 100644
index 0000000000..82c50dd6fc
--- /dev/null
+++ b/core/src/test/resources/indices/bwc/repo-5.3.2.zip
Binary files differ