summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIsabel Drost-Fromm <isabel.drostfromm@elasticsearch.com>2016-03-29 11:04:02 +0200
committerIsabel Drost-Fromm <isabel.drostfromm@elasticsearch.com>2016-03-29 11:04:02 +0200
commit407e2cdcf93475e2b326228eaf169fa2572d7d8b (patch)
tree3810be0a7ec51b990fef5b2f7a9cb1ecf3557b10
parent5dd481bfe3ef81a59217f7809ca51dc4f31b893e (diff)
parentc7bdfb1126d47442f7e12f996eecbb7fab315c2d (diff)
Merge branch 'master' into deprecation/sort-option-reverse-removal
Conflicts: core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java
-rw-r--r--CONTRIBUTING.md4
-rw-r--r--README.textile2
-rw-r--r--buildSrc/src/main/resources/checkstyle_suppressions.xml21
-rw-r--r--core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/ActionModule.java3
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java48
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java195
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java66
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java61
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java200
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java194
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java14
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java2
-rw-r--r--core/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java35
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java14
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/replication/ReplicationTask.java9
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java64
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java35
-rw-r--r--core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java18
-rw-r--r--core/src/main/java/org/elasticsearch/client/support/AbstractClient.java19
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/ClusterState.java51
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java161
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java4
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java90
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java30
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java14
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java23
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java24
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java17
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java66
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java83
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java15
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java9
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java13
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java4
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java6
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java5
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java26
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java16
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java25
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java5
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java31
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/CoordinateCollection.java22
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java34
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java48
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java32
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java41
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java34
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java52
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java24
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java64
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java27
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java14
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java9
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java52
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java19
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java12
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java5
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java32
-rw-r--r--core/src/main/java/org/elasticsearch/common/network/NetworkModule.java9
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java3
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java4
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java47
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java44
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java16
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java4
-rw-r--r--core/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java59
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java65
-rw-r--r--core/src/main/java/org/elasticsearch/index/IndexService.java31
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/Engine.java76
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java6
-rwxr-xr-xcore/src/main/java/org/elasticsearch/index/mapper/MapperService.java5
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java54
-rw-r--r--core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/IndexShard.java107
-rw-r--r--core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java250
-rw-r--r--core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java32
-rw-r--r--core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java7
-rw-r--r--core/src/main/java/org/elasticsearch/ingest/processor/TrackingResultProcessor.java89
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/cluster/allocation/RestClusterAllocationExplainAction.java89
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchModule.java41
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregatorBuilder.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java47
-rw-r--r--core/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java40
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java84
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java90
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java39
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java74
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java38
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/SortMode.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/SortOrder.java9
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TransportService.java17
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainTests.java86
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java91
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java4
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java74
-rw-r--r--core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java121
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java10
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java18
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java22
-rw-r--r--core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java20
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java8
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java46
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java241
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java6
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java1
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java80
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java13
-rw-r--r--core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java36
-rw-r--r--core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java28
-rw-r--r--core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java5
-rw-r--r--core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java21
-rw-r--r--core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java3
-rw-r--r--core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java62
-rw-r--r--core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java21
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java37
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoaderTests.java36
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java66
-rw-r--r--core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java39
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java57
-rw-r--r--core/src/test/java/org/elasticsearch/index/IndexServiceTests.java7
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java67
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/internal/TypeFieldTypeTests.java49
-rw-r--r--core/src/test/java/org/elasticsearch/index/percolator/ExtractQueryTermsServiceTests.java32
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java82
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java16
-rw-r--r--core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java193
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java1
-rw-r--r--core/src/test/java/org/elasticsearch/ingest/processor/TrackingResultProcessorTests.java129
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java9
-rw-r--r--core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java1
-rw-r--r--core/src/test/java/org/elasticsearch/search/percolator/PercolatorQuerySearchIT.java32
-rw-r--r--core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java1
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java14
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java5
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java15
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java7
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java15
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java4
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/SortOrderTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java2
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java8
-rw-r--r--core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java3
-rw-r--r--distribution/src/main/packaging/scripts/postinst4
-rw-r--r--docs/reference/cluster.asciidoc2
-rw-r--r--docs/reference/cluster/allocation-explain.asciidoc159
-rw-r--r--docs/reference/migration/migrate_5_0/java.asciidoc5
-rw-r--r--docs/reference/migration/migrate_5_0/rest.asciidoc5
-rw-r--r--docs/reference/migration/migrate_5_0/settings.asciidoc15
-rw-r--r--docs/reference/modules/discovery/zen.asciidoc8
-rw-r--r--docs/reference/modules/painless.asciidoc299
-rw-r--r--docs/reference/query-dsl/template-query.asciidoc2
-rw-r--r--docs/reference/search/percolate.asciidoc6
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java10
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java2
-rw-r--r--modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexResponse.java2
-rw-r--r--modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java7
-rw-r--r--modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml8
-rw-r--r--modules/reindex/src/test/resources/rest-api-spec/test/reindex/80_throttle.yaml2
-rw-r--r--modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml10
-rw-r--r--qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yaml52
-rw-r--r--qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/40_search_failures.yaml2
-rw-r--r--qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/40_search_failure.yaml2
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats2
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/90_reinstall.bats72
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/os_package.bash3
-rw-r--r--qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash45
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json1
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json20
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml76
-rw-r--r--rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml3
-rw-r--r--test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java2
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java4
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java2
188 files changed, 4791 insertions, 1819 deletions
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1b755a8869..2c69cbf3e3 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -54,7 +54,7 @@ Once your changes and tests are ready to submit for review:
1. Test your changes
Run the test suite to make sure that nothing is broken. See the
-[TESTING](../TESTING.asciidoc) file for help running tests.
+[TESTING](TESTING.asciidoc) file for help running tests.
2. Sign the Contributor License Agreement
@@ -102,5 +102,3 @@ Before submitting your changes, run the test suite to make sure that nothing is
```sh
gradle check
```
-
-Source: [Contributing to elasticsearch](https://www.elastic.co/contributing-to-elasticsearch/)
diff --git a/README.textile b/README.textile
index 804f46a181..be51570e27 100644
--- a/README.textile
+++ b/README.textile
@@ -147,7 +147,7 @@ curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
}'
</pre>
-The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index.
+The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get their own special index.
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml
index af83e2db0e..673f1d5dc6 100644
--- a/buildSrc/src/main/resources/checkstyle_suppressions.xml
+++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml
@@ -326,17 +326,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]CancelAllocationCommand.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]command[/\\]MoveAllocationCommand.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]AllocationDeciders.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]AwarenessAllocationDecider.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]ClusterRebalanceAllocationDecider.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]ConcurrentRebalanceAllocationDecider.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]DiskThresholdDecider.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]EnableAllocationDecider.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]FilterAllocationDecider.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]NodeVersionAllocationDecider.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]SameShardAllocationDecider.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]ShardsLimitAllocationDecider.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]SnapshotInProgressAllocationDecider.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]ThrottlingAllocationDecider.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]service[/\\]InternalClusterService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]Base64.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]Booleans.java" checks="LineLength" />
@@ -349,16 +338,12 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CheckFileCommand.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]collect[/\\]ImmutableOpenIntMap.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]GeoDistance.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]LineStringBuilder.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]PolygonBuilder.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]ShapeBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]DefaultConstructionProxyFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]InjectorImpl.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]internal[/\\]ConstructionContext.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]multibindings[/\\]MapBinder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]spi[/\\]InjectionPoint.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]Channels.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]stream[/\\]NamedWriteableRegistry.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]joda[/\\]Joda.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]Lucene.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]all[/\\]AllTermQuery.java" checks="LineLength" />
@@ -380,7 +365,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]IndexScopedSettings.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]Setting.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]Settings.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]loader[/\\]PropertiesSettingsLoader.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]loader[/\\]XContentSettingsLoader.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]ByteSizeValue.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]TimeValue.java" checks="LineLength" />
@@ -1066,9 +1050,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]breaker[/\\]MemoryCircuitBreakerTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]ShapeBuilderTests.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]AbstractShapeBuilderTestCase.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]EnvelopeBuilderTests.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]builders[/\\]PolygonBuilderTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]hash[/\\]MessageDigestsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]ModuleTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]stream[/\\]BytesStreamsTests.java" checks="LineLength" />
@@ -1078,8 +1059,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]rounding[/\\]TimeZoneRoundingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]ScopedSettingsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]SettingTests.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]loader[/\\]JsonSettingsLoaderTests.java" checks="LineLength" />
- <suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]loader[/\\]YamlSettingsLoaderTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]transport[/\\]BoundTransportAddressTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]DistanceUnitTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]FuzzinessTests.java" checks="LineLength" />
diff --git a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java
index 4e24944ffa..564f780b8e 100644
--- a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java
+++ b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java
@@ -237,6 +237,10 @@ public abstract class BlendedTermQuery extends Query {
return newCtx;
}
+ public List<Term> getTerms() {
+ return Arrays.asList(terms);
+ }
+
@Override
public String toString(String field) {
StringBuilder builder = new StringBuilder("blended(terms:[");
diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java
index 2b33a66942..a659e60f50 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -19,6 +19,8 @@
package org.elasticsearch.action;
+import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
+import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction;
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction;
@@ -263,6 +265,7 @@ public class ActionModule extends AbstractModule {
registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
+ registerAction(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class);
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class);
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java
new file mode 100644
index 0000000000..d34ac63602
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainAction.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.allocation;
+
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.ElasticsearchClient;
+
+/**
+ * Action for explaining shard allocation for a shard in the cluster
+ */
+public class ClusterAllocationExplainAction extends Action<ClusterAllocationExplainRequest,
+ ClusterAllocationExplainResponse,
+ ClusterAllocationExplainRequestBuilder> {
+
+ public static final ClusterAllocationExplainAction INSTANCE = new ClusterAllocationExplainAction();
+ public static final String NAME = "cluster:monitor/allocation/explain";
+
+ private ClusterAllocationExplainAction() {
+ super(NAME);
+ }
+
+ @Override
+ public ClusterAllocationExplainResponse newResponse() {
+ return new ClusterAllocationExplainResponse();
+ }
+
+ @Override
+ public ClusterAllocationExplainRequestBuilder newRequestBuilder(ElasticsearchClient client) {
+ return new ClusterAllocationExplainRequestBuilder(client, this);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java
new file mode 100644
index 0000000000..d14785127d
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.allocation;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.support.master.MasterNodeRequest;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Objects;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A request to explain the allocation of a shard in the cluster
+ */
+public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAllocationExplainRequest> {
+
+ private String index;
+ private Integer shard;
+ private Boolean primary;
+ private boolean includeYesDecisions = false;
+
+ /** Explain the first unassigned shard */
+ public ClusterAllocationExplainRequest() {
+ this.index = null;
+ this.shard = null;
+ this.primary = null;
+ }
+
+ /**
+ * Create a new allocation explain request. If {@code primary} is false, the first unassigned replica
+ * will be picked for explanation. If no replicas are unassigned, the first assigned replica will
+ * be explained.
+ */
+ public ClusterAllocationExplainRequest(String index, int shard, boolean primary) {
+ this.index = index;
+ this.shard = shard;
+ this.primary = primary;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (this.useAnyUnassignedShard() == false) {
+ if (this.index == null) {
+ validationException = addValidationError("index must be specified", validationException);
+ }
+ if (this.shard == null) {
+ validationException = addValidationError("shard must be specified", validationException);
+ }
+ if (this.primary == null) {
+ validationException = addValidationError("primary must be specified", validationException);
+ }
+ }
+ return validationException;
+ }
+
+ /**
+ * Returns {@code true} iff the first unassigned shard is to be used
+ */
+ public boolean useAnyUnassignedShard() {
+ return this.index == null && this.shard == null && this.primary == null;
+ }
+
+ public ClusterAllocationExplainRequest setIndex(String index) {
+ this.index = index;
+ return this;
+ }
+
+ @Nullable
+ public String getIndex() {
+ return this.index;
+ }
+
+ public ClusterAllocationExplainRequest setShard(Integer shard) {
+ this.shard = shard;
+ return this;
+ }
+
+ @Nullable
+ public int getShard() {
+ return this.shard;
+ }
+
+ public ClusterAllocationExplainRequest setPrimary(Boolean primary) {
+ this.primary = primary;
+ return this;
+ }
+
+ @Nullable
+ public boolean isPrimary() {
+ return this.primary;
+ }
+
+ public void includeYesDecisions(boolean includeYesDecisions) {
+ this.includeYesDecisions = includeYesDecisions;
+ }
+
+ /** Returns true if all decisions should be included. Otherwise only "NO" and "THROTTLE" decisions are returned */
+ public boolean includeYesDecisions() {
+ return this.includeYesDecisions;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("ClusterAllocationExplainRequest[");
+ if (this.useAnyUnassignedShard()) {
+ sb.append("useAnyUnassignedShard=true");
+ } else {
+ sb.append("index=").append(index);
+ sb.append(",shard=").append(shard);
+ sb.append(",primary?=").append(primary);
+ }
+ sb.append(",includeYesDecisions?=").append(includeYesDecisions);
+ return sb.toString();
+ }
+
+ public static ClusterAllocationExplainRequest parse(XContentParser parser) throws IOException {
+ String currentFieldName = null;
+ String index = null;
+ Integer shard = null;
+ Boolean primary = null;
+ XContentParser.Token token;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("index".equals(currentFieldName)) {
+ index = parser.text();
+ } else if ("shard".equals(currentFieldName)) {
+ shard = parser.intValue();
+ } else if ("primary".equals(currentFieldName)) {
+ primary = parser.booleanValue();
+ } else {
+ throw new ElasticsearchParseException("unexpected field [" + currentFieldName + "] in allocation explain request");
+ }
+
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ // the object was started
+ continue;
+ } else {
+ throw new ElasticsearchParseException("unexpected token [" + token + "] in allocation explain request");
+ }
+ }
+
+ if (index == null && shard == null && primary == null) {
+ // If it was an empty body, use the "any unassigned shard" request
+ return new ClusterAllocationExplainRequest();
+ } else if (index == null || shard == null || primary == null) {
+ throw new ElasticsearchParseException("'index', 'shard', and 'primary' must be specified in allocation explain request");
+ }
+ return new ClusterAllocationExplainRequest(index, shard, primary);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ this.index = in.readOptionalString();
+ this.shard = in.readOptionalVInt();
+ this.primary = in.readOptionalBoolean();
+ this.includeYesDecisions = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeOptionalString(index);
+ out.writeOptionalVInt(shard);
+ out.writeOptionalBoolean(primary);
+ out.writeBoolean(includeYesDecisions);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java
new file mode 100644
index 0000000000..1a1950c7f1
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.allocation;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
+import org.elasticsearch.client.ElasticsearchClient;
+
+/**
+ * Builder for requests to explain the allocation of a shard in the cluster
+ */
+public class ClusterAllocationExplainRequestBuilder
+ extends MasterNodeOperationRequestBuilder<ClusterAllocationExplainRequest,
+ ClusterAllocationExplainResponse,
+ ClusterAllocationExplainRequestBuilder> {
+
+ public ClusterAllocationExplainRequestBuilder(ElasticsearchClient client, ClusterAllocationExplainAction action) {
+ super(client, action, new ClusterAllocationExplainRequest());
+ }
+
+ /** The index name to use when finding the shard to explain */
+ public ClusterAllocationExplainRequestBuilder setIndex(String index) {
+ request.setIndex(index);
+ return this;
+ }
+
+ /** The shard number to use when finding the shard to explain */
+ public ClusterAllocationExplainRequestBuilder setShard(int shard) {
+ request.setShard(shard);
+ return this;
+ }
+
+ /** Whether the primary or replica should be explained */
+ public ClusterAllocationExplainRequestBuilder setPrimary(boolean primary) {
+ request.setPrimary(primary);
+ return this;
+ }
+
+ /**
+ * Signal that the first unassigned shard should be used
+ */
+ public ClusterAllocationExplainRequestBuilder useAnyUnassignedShard() {
+ request.setIndex(null);
+ request.setShard(null);
+ request.setPrimary(null);
+ return this;
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java
new file mode 100644
index 0000000000..cc586bd1a5
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.allocation;
+
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ * Explanation response for a shard in the cluster
+ */
+public class ClusterAllocationExplainResponse extends ActionResponse {
+
+ private ClusterAllocationExplanation cae;
+
+ public ClusterAllocationExplainResponse() {
+ }
+
+ public ClusterAllocationExplainResponse(ClusterAllocationExplanation cae) {
+ this.cae = cae;
+ }
+
+ /**
+ * Return the explanation for shard allocation in the cluster
+ */
+ public ClusterAllocationExplanation getExplanation() {
+ return this.cae;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ this.cae = new ClusterAllocationExplanation(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ cae.writeTo(out);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java
new file mode 100644
index 0000000000..6b4173734b
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.allocation;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.ToXContent.Params;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A {@code ClusterAllocationExplanation} is an explanation of why a shard may or may not be allocated to nodes. It also includes weights
+ * for where the shard is likely to be assigned. It is an immutable class
+ */
+public final class ClusterAllocationExplanation implements ToXContent, Writeable<ClusterAllocationExplanation> {
+
+ private final ShardId shard;
+ private final boolean primary;
+ private final String assignedNodeId;
+ private final Map<DiscoveryNode, Decision> nodeToDecision;
+ private final Map<DiscoveryNode, Float> nodeWeights;
+ private final UnassignedInfo unassignedInfo;
+
+ public ClusterAllocationExplanation(StreamInput in) throws IOException {
+ this.shard = ShardId.readShardId(in);
+ this.primary = in.readBoolean();
+ this.assignedNodeId = in.readOptionalString();
+ this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
+
+ Map<DiscoveryNode, Decision> ntd = null;
+ int size = in.readVInt();
+ ntd = new HashMap<>(size);
+ for (int i = 0; i < size; i++) {
+ DiscoveryNode dn = DiscoveryNode.readNode(in);
+ Decision decision = Decision.readFrom(in);
+ ntd.put(dn, decision);
+ }
+ this.nodeToDecision = ntd;
+
+ Map<DiscoveryNode, Float> ntw = null;
+ size = in.readVInt();
+ ntw = new HashMap<>(size);
+ for (int i = 0; i < size; i++) {
+ DiscoveryNode dn = DiscoveryNode.readNode(in);
+ float weight = in.readFloat();
+ ntw.put(dn, weight);
+ }
+ this.nodeWeights = ntw;
+ }
+
+ public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId,
+ UnassignedInfo unassignedInfo, Map<DiscoveryNode, Decision> nodeToDecision,
+ Map<DiscoveryNode, Float> nodeWeights) {
+ this.shard = shard;
+ this.primary = primary;
+ this.assignedNodeId = assignedNodeId;
+ this.unassignedInfo = unassignedInfo;
+ this.nodeToDecision = nodeToDecision == null ? Collections.emptyMap() : nodeToDecision;
+ this.nodeWeights = nodeWeights == null ? Collections.emptyMap() : nodeWeights;
+ }
+
+ public ShardId getShard() {
+ return this.shard;
+ }
+
+ public boolean isPrimary() {
+ return this.primary;
+ }
+
+ /** Return turn if the shard is assigned to a node */
+ public boolean isAssigned() {
+ return this.assignedNodeId != null;
+ }
+
+ /** Return the assigned node id or null if not assigned */
+ @Nullable
+ public String getAssignedNodeId() {
+ return this.assignedNodeId;
+ }
+
+ /** Return the unassigned info for the shard or null if the shard is assigned */
+ @Nullable
+ public UnassignedInfo getUnassignedInfo() {
+ return this.unassignedInfo;
+ }
+
+ /** Return a map of node to decision for shard allocation */
+ public Map<DiscoveryNode, Decision> getNodeDecisions() {
+ return this.nodeToDecision;
+ }
+
+ /**
+ * Return a map of node to balancer "weight" for allocation. Higher weights mean the balancer wants to allocated the shard to that node
+ * more
+ */
+ public Map<DiscoveryNode, Float> getNodeWeights() {
+ return this.nodeWeights;
+ }
+
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(); {
+ builder.startObject("shard"); {
+ builder.field("index", shard.getIndexName());
+ builder.field("index_uuid", shard.getIndex().getUUID());
+ builder.field("id", shard.getId());
+ builder.field("primary", primary);
+ }
+ builder.endObject(); // end shard
+ builder.field("assigned", this.assignedNodeId != null);
+ // If assigned, show the node id of the node it's assigned to
+ if (assignedNodeId != null) {
+ builder.field("assigned_node_id", this.assignedNodeId);
+ }
+ // If we have unassigned info, show that
+ if (unassignedInfo != null) {
+ unassignedInfo.toXContent(builder, params);
+ }
+ builder.startObject("nodes");
+ for (Map.Entry<DiscoveryNode, Float> entry : nodeWeights.entrySet()) {
+ DiscoveryNode node = entry.getKey();
+ builder.startObject(node.getId()); {
+ builder.field("node_name", node.getName());
+ builder.startObject("node_attributes"); {
+ for (ObjectObjectCursor<String, String> attrKV : node.attributes()) {
+ builder.field(attrKV.key, attrKV.value);
+ }
+ }
+ builder.endObject(); // end attributes
+ Decision d = nodeToDecision.get(node);
+ if (node.getId().equals(assignedNodeId)) {
+ builder.field("final_decision", "CURRENTLY_ASSIGNED");
+ } else {
+ builder.field("final_decision", d.type().toString());
+ }
+ builder.field("weight", entry.getValue());
+ d.toXContent(builder, params);
+ }
+ builder.endObject(); // end node <uuid>
+ }
+ builder.endObject(); // end nodes
+ }
+ builder.endObject(); // end wrapping object
+ return builder;
+ }
+
+ @Override
+ public ClusterAllocationExplanation readFrom(StreamInput in) throws IOException {
+ return new ClusterAllocationExplanation(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ this.getShard().writeTo(out);
+ out.writeBoolean(this.isPrimary());
+ out.writeOptionalString(this.getAssignedNodeId());
+ out.writeOptionalWriteable(this.getUnassignedInfo());
+
+ Map<DiscoveryNode, Decision> ntd = this.getNodeDecisions();
+ out.writeVInt(ntd.size());
+ for (Map.Entry<DiscoveryNode, Decision> entry : ntd.entrySet()) {
+ entry.getKey().writeTo(out);
+ Decision.writeTo(entry.getValue(), out);
+ }
+ Map<DiscoveryNode, Float> ntw = this.getNodeWeights();
+ out.writeVInt(ntw.size());
+ for (Map.Entry<DiscoveryNode, Float> entry : ntw.entrySet()) {
+ entry.getKey().writeTo(out);
+ out.writeFloat(entry.getValue());
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
new file mode 100644
index 0000000000..b9b31634bb
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.allocation;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.master.TransportMasterNodeAction;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.MetaData.Custom;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes.RoutingNodesIterator;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the
+ * master node in the cluster.
+ */
+public class TransportClusterAllocationExplainAction
+ extends TransportMasterNodeAction<ClusterAllocationExplainRequest, ClusterAllocationExplainResponse> {
+
+ private final AllocationService allocationService;
+ private final ClusterInfoService clusterInfoService;
+ private final AllocationDeciders allocationDeciders;
+ private final ShardsAllocator shardAllocator;
+
+ @Inject
+ public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
+ ThreadPool threadPool, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver,
+ AllocationService allocationService, ClusterInfoService clusterInfoService,
+ AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator) {
+ super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
+ indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
+ this.allocationService = allocationService;
+ this.clusterInfoService = clusterInfoService;
+ this.allocationDeciders = allocationDeciders;
+ this.shardAllocator = shardAllocator;
+ }
+
+ @Override
+ protected String executor() {
+ return ThreadPool.Names.MANAGEMENT;
+ }
+
+ @Override
+ protected ClusterBlockException checkBlock(ClusterAllocationExplainRequest request, ClusterState state) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
+ }
+
+ @Override
+ protected ClusterAllocationExplainResponse newResponse() {
+ return new ClusterAllocationExplainResponse();
+ }
+
+ /**
+ * Return the decisions for the given {@code ShardRouting} on the given {@code RoutingNode}. If {@code includeYesDecisions} is not true,
+ * only non-YES (NO and THROTTLE) decisions are returned.
+ */
+ public static Decision tryShardOnNode(ShardRouting shard, RoutingNode node, RoutingAllocation allocation, boolean includeYesDecisions) {
+ Decision d = allocation.deciders().canAllocate(shard, node, allocation);
+ if (includeYesDecisions) {
+ return d;
+ } else {
+ Decision.Multi nonYesDecisions = new Decision.Multi();
+ List<Decision> decisions = d.getDecisions();
+ for (Decision decision : decisions) {
+ if (decision.type() != Decision.Type.YES) {
+ nonYesDecisions.add(decision);
+ }
+ }
+ return nonYesDecisions;
+ }
+ }
+
+ /**
+ * For the given {@code ShardRouting}, return the explanation of the allocation for that shard on all nodes. If {@code
+ * includeYesDecisions} is true, returns all decisions, otherwise returns only 'NO' and 'THROTTLE' decisions.
+ */
+ public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
+ boolean includeYesDecisions, ShardsAllocator shardAllocator) {
+ // don't short circuit deciders, we want a full explanation
+ allocation.debugDecision(true);
+ // get the existing unassigned info if available
+ UnassignedInfo ui = shard.unassignedInfo();
+
+ RoutingNodesIterator iter = routingNodes.nodes();
+ Map<DiscoveryNode, Decision> nodeToDecision = new HashMap<>();
+ while (iter.hasNext()) {
+ RoutingNode node = iter.next();
+ DiscoveryNode discoNode = node.node();
+ if (discoNode.isDataNode()) {
+ Decision d = tryShardOnNode(shard, node, allocation, includeYesDecisions);
+ nodeToDecision.put(discoNode, d);
+ }
+ }
+ return new ClusterAllocationExplanation(shard.shardId(), shard.primary(), shard.currentNodeId(), ui, nodeToDecision,
+ shardAllocator.weighShard(allocation, shard));
+ }
+
+ @Override
+ protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
+ final ActionListener<ClusterAllocationExplainResponse> listener) {
+ final RoutingNodes routingNodes = state.getRoutingNodes();
+ final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state.nodes(),
+ clusterInfoService.getClusterInfo(), System.nanoTime());
+
+ ShardRouting shardRouting = null;
+ if (request.useAnyUnassignedShard()) {
+ // If we can use any shard, just pick the first unassigned one (if there are any)
+ RoutingNodes.UnassignedShards.UnassignedIterator ui = routingNodes.unassigned().iterator();
+ if (ui.hasNext()) {
+ shardRouting = ui.next();
+ }
+ } else {
+ String index = request.getIndex();
+ int shard = request.getShard();
+ if (request.isPrimary()) {
+ // If we're looking for the primary shard, there's only one copy, so pick it directly
+ shardRouting = allocation.routingTable().shardRoutingTable(index, shard).primaryShard();
+ } else {
+ // If looking for a replica, go through all the replica shards
+ List<ShardRouting> replicaShardRoutings = allocation.routingTable().shardRoutingTable(index, shard).replicaShards();
+ if (replicaShardRoutings.size() > 0) {
+ // Pick the first replica at the very least
+ shardRouting = replicaShardRoutings.get(0);
+ // In case there are multiple replicas where some are assigned and some aren't,
+ // try to find one that is unassigned at least
+ for (ShardRouting replica : replicaShardRoutings) {
+ if (replica.unassigned()) {
+ shardRouting = replica;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (shardRouting == null) {
+ listener.onFailure(new ElasticsearchException("unable to find any shards to explain [{}] in the routing table", request));
+ return;
+ }
+ logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting);
+
+ ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
+ request.includeYesDecisions(), shardAllocator);
+ listener.onResponse(new ClusterAllocationExplainResponse(cae));
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java
index 1d62fc06f0..442b5edde7 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java
@@ -235,7 +235,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
scriptStats = in.readOptionalStreamable(ScriptStats::new);
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
- ingestStats = in.readOptionalWritable(IngestStats::new);
+ ingestStats = in.readOptionalWriteable(IngestStats::new);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java
index 5604616ed3..6020aa1a10 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java
@@ -302,7 +302,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
int availableProcessors;
int allocatedProcessors;
- long availableMemory;
final ObjectIntHashMap<String> names;
public OsStats() {
@@ -326,15 +325,10 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
return allocatedProcessors;
}
- public ByteSizeValue getAvailableMemory() {
- return new ByteSizeValue(availableMemory);
- }
-
@Override
public void readFrom(StreamInput in) throws IOException {
availableProcessors = in.readVInt();
allocatedProcessors = in.readVInt();
- availableMemory = in.readLong();
int size = in.readVInt();
names.clear();
for (int i = 0; i < size; i++) {
@@ -346,7 +340,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(availableProcessors);
out.writeVInt(allocatedProcessors);
- out.writeLong(availableMemory);
out.writeVInt(names.size());
for (ObjectIntCursor<String> name : names) {
out.writeString(name.key);
@@ -365,9 +358,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
static final XContentBuilderString ALLOCATED_PROCESSORS = new XContentBuilderString("allocated_processors");
static final XContentBuilderString NAME = new XContentBuilderString("name");
static final XContentBuilderString NAMES = new XContentBuilderString("names");
- static final XContentBuilderString MEM = new XContentBuilderString("mem");
- static final XContentBuilderString TOTAL = new XContentBuilderString("total");
- static final XContentBuilderString TOTAL_IN_BYTES = new XContentBuilderString("total_in_bytes");
static final XContentBuilderString COUNT = new XContentBuilderString("count");
}
@@ -375,10 +365,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors);
builder.field(Fields.ALLOCATED_PROCESSORS, allocatedProcessors);
- builder.startObject(Fields.MEM);
- builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, availableMemory);
- builder.endObject();
-
builder.startArray(Fields.NAMES);
for (ObjectIntCursor<String> name : names) {
builder.startObject();
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java
index 7b389dba25..c638a429b1 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java
@@ -283,7 +283,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
source = in.readString();
updateAllTypes = in.readBoolean();
readTimeout(in);
- concreteIndex = in.readOptionalWritable(Index::new);
+ concreteIndex = in.readOptionalWriteable(Index::new);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java b/core/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java
index 30efbe1b0f..bc40a8368f 100644
--- a/core/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java
+++ b/core/src/main/java/org/elasticsearch/action/ingest/SimulateExecutionService.java
@@ -23,13 +23,14 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.ingest.core.IngestDocument;
import org.elasticsearch.ingest.core.Pipeline;
-import org.elasticsearch.ingest.core.Processor;
import org.elasticsearch.ingest.core.CompoundProcessor;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList;
import java.util.List;
+import static org.elasticsearch.ingest.processor.TrackingResultProcessor.decorate;
+
class SimulateExecutionService {
private static final String THREAD_POOL_NAME = ThreadPool.Names.MANAGEMENT;
@@ -40,40 +41,16 @@ class SimulateExecutionService {
this.threadPool = threadPool;
}
- void executeVerboseDocument(Processor processor, IngestDocument ingestDocument, List<SimulateProcessorResult> processorResultList) throws Exception {
- if (processor instanceof CompoundProcessor) {
- CompoundProcessor cp = (CompoundProcessor) processor;
- try {
- for (Processor p : cp.getProcessors()) {
- executeVerboseDocument(p, ingestDocument, processorResultList);
- }
- } catch (Exception e) {
- for (Processor p : cp.getOnFailureProcessors()) {
- executeVerboseDocument(p, ingestDocument, processorResultList);
- }
- }
- } else {
- try {
- processor.execute(ingestDocument);
- processorResultList.add(new SimulateProcessorResult(processor.getTag(), new IngestDocument(ingestDocument)));
- } catch (Exception e) {
- processorResultList.add(new SimulateProcessorResult(processor.getTag(), e));
- throw e;
- }
- }
- }
-
SimulateDocumentResult executeDocument(Pipeline pipeline, IngestDocument ingestDocument, boolean verbose) {
if (verbose) {
List<SimulateProcessorResult> processorResultList = new ArrayList<>();
- IngestDocument currentIngestDocument = new IngestDocument(ingestDocument);
- CompoundProcessor pipelineProcessor = new CompoundProcessor(pipeline.getProcessors(), pipeline.getOnFailureProcessors());
+ CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), processorResultList);
try {
- executeVerboseDocument(pipelineProcessor, currentIngestDocument, processorResultList);
+ verbosePipelineProcessor.execute(ingestDocument);
+ return new SimulateDocumentVerboseResult(processorResultList);
} catch (Exception e) {
- return new SimulateDocumentBaseResult(e);
+ return new SimulateDocumentVerboseResult(processorResultList);
}
- return new SimulateDocumentVerboseResult(processorResultList);
} else {
try {
pipeline.execute(ingestDocument);
diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java
index 6283e69a02..b4cfbb6ad8 100644
--- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java
@@ -51,6 +51,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
*/
protected ShardId shardId;
+ long primaryTerm;
+
protected TimeValue timeout = DEFAULT_TIMEOUT;
protected String index;
@@ -148,6 +150,16 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
return routedBasedOnClusterVersion;
}
+ /** returns the primary term active at the time the operation was performed on the primary shard */
+ public long primaryTerm() {
+ return primaryTerm;
+ }
+
+ /** marks the primary term in which the operation was performed */
+ public void primaryTerm(long term) {
+ primaryTerm = term;
+ }
+
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
@@ -169,6 +181,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
timeout = TimeValue.readTimeValue(in);
index = in.readString();
routedBasedOnClusterVersion = in.readVLong();
+ primaryTerm = in.readVLong();
}
@Override
@@ -184,6 +197,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
timeout.writeTo(out);
out.writeString(index);
out.writeVLong(routedBasedOnClusterVersion);
+ out.writeVLong(primaryTerm);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationTask.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationTask.java
index da3fce74fa..9fe3da59a1 100644
--- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationTask.java
+++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationTask.java
@@ -59,7 +59,7 @@ public class ReplicationTask extends Task {
}
public static class Status implements Task.Status {
- public static final Status PROTOTYPE = new Status("prototype");
+ public static final String NAME = "replication";
private final String phase;
@@ -73,7 +73,7 @@ public class ReplicationTask extends Task {
@Override
public String getWriteableName() {
- return "replication";
+ return NAME;
}
@Override
@@ -88,10 +88,5 @@ public class ReplicationTask extends Task {
public void writeTo(StreamOutput out) throws IOException {
out.writeString(phase);
}
-
- @Override
- public Status readFrom(StreamInput in) throws IOException {
- return new Status(in);
- }
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
index 1ddddbf888..d70e271fa2 100644
--- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
@@ -52,7 +52,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.ThreadContext;
-import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.IndexShard;
@@ -359,32 +358,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
});
} else {
- try {
- failReplicaIfNeeded(t);
- } catch (Throwable unexpected) {
- logger.error("{} unexpected error while failing replica", unexpected, request.shardId().id());
- } finally {
responseWithFailure(t);
- }
- }
- }
-
- private void failReplicaIfNeeded(Throwable t) {
- Index index = request.shardId().getIndex();
- int shardId = request.shardId().id();
- logger.trace("failure on replica [{}][{}], action [{}], request [{}]", t, index, shardId, actionName, request);
- if (ignoreReplicaException(t) == false) {
- IndexService indexService = indicesService.indexService(index);
- if (indexService == null) {
- logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
- return;
- }
- IndexShard indexShard = indexService.getShardOrNull(shardId);
- if (indexShard == null) {
- logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
- return;
- }
- indexShard.failShard(actionName + " failed on replica", t);
}
}
@@ -401,7 +375,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected void doRun() throws Exception {
setPhase(task, "replica");
assert request.shardId() != null : "request shardId must be set";
- try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId())) {
+ try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId(), request.primaryTerm())) {
shardOperationOnReplica(request);
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), request);
@@ -707,7 +681,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
indexShardReference = getIndexShardReferenceOnPrimary(shardId);
if (indexShardReference.isRelocated() == false) {
executeLocally();
-
} else {
executeRemotely();
}
@@ -716,6 +689,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private void executeLocally() throws Exception {
// execute locally
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
+ primaryResponse.v2().primaryTerm(indexShardReference.opPrimaryTerm());
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
}
@@ -825,17 +799,17 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
- return new IndexShardReferenceImpl(indexShard, true);
+ return IndexShardReferenceImpl.createOnPrimary(indexShard);
}
/**
* returns a new reference to {@link IndexShard} on a node that the request is replicated to. The reference is closed as soon as
* replication is completed on the node.
*/
- protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId) {
+ protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId, long primaryTerm) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
- return new IndexShardReferenceImpl(indexShard, false);
+ return IndexShardReferenceImpl.createOnReplica(indexShard, primaryTerm);
}
/**
@@ -1098,9 +1072,13 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
totalShards,
success.get(),
failuresArray
-
)
);
+ if (logger.isTraceEnabled()) {
+ logger.trace("finished replicating action [{}], request [{}], shardInfo [{}]", actionName, replicaRequest,
+ finalResponse.getShardInfo());
+ }
+
try {
channel.sendResponse(finalResponse);
} catch (IOException responseException) {
@@ -1125,6 +1103,9 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
boolean isRelocated();
void failShard(String reason, @Nullable Throwable e);
ShardRouting routingEntry();
+
+ /** returns the primary term of the current operation */
+ long opPrimaryTerm();
}
static final class IndexShardReferenceImpl implements IndexShardReference {
@@ -1132,15 +1113,23 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
private final IndexShard indexShard;
private final Releasable operationLock;
- IndexShardReferenceImpl(IndexShard indexShard, boolean primaryAction) {
+ private IndexShardReferenceImpl(IndexShard indexShard, long primaryTerm) {
this.indexShard = indexShard;
- if (primaryAction) {
+ if (primaryTerm < 0) {
operationLock = indexShard.acquirePrimaryOperationLock();
} else {
- operationLock = indexShard.acquireReplicaOperationLock();
+ operationLock = indexShard.acquireReplicaOperationLock(primaryTerm);
}
}
+ static IndexShardReferenceImpl createOnPrimary(IndexShard indexShard) {
+ return new IndexShardReferenceImpl(indexShard, -1);
+ }
+
+ static IndexShardReferenceImpl createOnReplica(IndexShard indexShard, long primaryTerm) {
+ return new IndexShardReferenceImpl(indexShard, primaryTerm);
+ }
+
@Override
public void close() {
operationLock.close();
@@ -1160,6 +1149,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
public ShardRouting routingEntry() {
return indexShard.routingEntry();
}
+
+ @Override
+ public long opPrimaryTerm() {
+ return indexShard.getPrimaryTerm();
+ }
}
protected final void processAfterWrite(boolean refresh, IndexShard indexShard, Translog.Location location) {
diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java
index 97678e6c06..ad7702466c 100644
--- a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java
@@ -207,8 +207,8 @@ public abstract class TransportTasksAction<
this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
ImmutableOpenMap<String, DiscoveryNode> nodes = clusterState.nodes().nodes();
this.nodes = new DiscoveryNode[nodesIds.length];
- for (int i = 0; i < nodesIds.length; i++) {
- this.nodes[i] = nodes.get(nodesIds[i]);
+ for (int i = 0; i < this.nodesIds.length; i++) {
+ this.nodes[i] = nodes.get(this.nodesIds[i]);
}
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
}
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
index 576ce720f3..dcb88dca84 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
@@ -25,6 +25,7 @@ import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.transport.TransportSettings;
@@ -39,7 +40,6 @@ import java.util.Set;
/**
* We enforce limits once any network host is configured. In this case we assume the node is running in production
* and all production limit checks must pass. This should be extended as we go to settings like:
- * - discovery.zen.minimum_master_nodes
* - discovery.zen.ping.unicast.hosts is set if we use zen disco
* - ensure we can write in all data directories
* - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform)
@@ -114,10 +114,10 @@ final class BootstrapCheck {
}
// the list of checks to execute
- private static List<Check> checks(final Settings settings) {
+ static List<Check> checks(final Settings settings) {
final List<Check> checks = new ArrayList<>();
final FileDescriptorCheck fileDescriptorCheck
- = Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
+ = Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
checks.add(fileDescriptorCheck);
checks.add(new MlockallCheck(BootstrapSettings.MLOCKALL_SETTING.get(settings)));
if (Constants.LINUX) {
@@ -126,6 +126,7 @@ final class BootstrapCheck {
if (Constants.LINUX || Constants.MAC_OS_X) {
checks.add(new MaxSizeVirtualMemoryCheck());
}
+ checks.add(new MinMasterNodesCheck(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(settings)));
return Collections.unmodifiableList(checks);
}
@@ -186,10 +187,10 @@ final class BootstrapCheck {
@Override
public final String errorMessage() {
return String.format(
- Locale.ROOT,
- "max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]",
- getMaxFileDescriptorCount(),
- limit
+ Locale.ROOT,
+ "max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]",
+ getMaxFileDescriptorCount(),
+ limit
);
}
@@ -226,6 +227,26 @@ final class BootstrapCheck {
}
+ static class MinMasterNodesCheck implements Check {
+
+ final boolean minMasterNodesIsSet;
+
+ MinMasterNodesCheck(boolean minMasterNodesIsSet) {
+ this.minMasterNodesIsSet = minMasterNodesIsSet;
+ }
+
+ @Override
+ public boolean check() {
+ return minMasterNodesIsSet == false;
+ }
+
+ @Override
+ public String errorMessage() {
+ return "please set [" + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() +
+ "] to a majority of the number of master eligible nodes in your cluster.";
+ }
+ }
+
static class MaxNumberOfThreadsCheck implements Check {
private final long maxNumberOfThreadsThreshold = 1 << 11;
diff --git a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java
index d7c76906f9..ecfe307e6c 100644
--- a/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java
+++ b/core/src/main/java/org/elasticsearch/client/ClusterAdminClient.java
@@ -21,6 +21,9 @@ package org.elasticsearch.client;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest;
+import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder;
+import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
@@ -572,4 +575,19 @@ public interface ClusterAdminClient extends ElasticsearchClient {
* Simulates an ingest pipeline
*/
SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source);
+
+ /**
+ * Explain the allocation of a shard
+ */
+ void allocationExplain(ClusterAllocationExplainRequest request, ActionListener<ClusterAllocationExplainResponse> listener);
+
+ /**
+ * Explain the allocation of a shard
+ */
+ ActionFuture<ClusterAllocationExplainResponse> allocationExplain(ClusterAllocationExplainRequest request);
+
+ /**
+ * Explain the allocation of a shard
+ */
+ ClusterAllocationExplainRequestBuilder prepareAllocationExplain();
}
diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
index 0044890ee3..cb1252dc46 100644
--- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
+++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
@@ -25,6 +25,10 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
+import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest;
+import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder;
+import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
@@ -1245,6 +1249,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client
public SimulatePipelineRequestBuilder prepareSimulatePipeline(BytesReference source) {
return new SimulatePipelineRequestBuilder(this, SimulatePipelineAction.INSTANCE, source);
}
+
+ @Override
+ public void allocationExplain(ClusterAllocationExplainRequest request, ActionListener<ClusterAllocationExplainResponse> listener) {
+ execute(ClusterAllocationExplainAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public ActionFuture<ClusterAllocationExplainResponse> allocationExplain(ClusterAllocationExplainRequest request) {
+ return execute(ClusterAllocationExplainAction.INSTANCE, request);
+ }
+
+ @Override
+ public ClusterAllocationExplainRequestBuilder prepareAllocationExplain() {
+ return new ClusterAllocationExplainRequestBuilder(this, ClusterAllocationExplainAction.INSTANCE);
+ }
}
static class IndicesAdmin implements IndicesAdminClient {
diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
index e6cc335a47..1ac379555a 100644
--- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
+++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
@@ -63,7 +63,7 @@ import java.util.Set;
/**
* Represents the current state of the cluster.
- *
+ * <p>
* The cluster state object is immutable with an
* exception of the {@link RoutingNodes} structure, which is built on demand from the {@link RoutingTable},
* and cluster state {@link #status}, which is updated during cluster state publishing and applying
@@ -74,7 +74,7 @@ import java.util.Set;
* the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish}
* method. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The
* publishing mechanism can be overridden by other discovery.
- *
+ * <p>
* The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state
* differences instead of the entire state on each change. The publishing mechanism should only send differences
* to a node if this node was present in the previous version of the cluster state. If a node is not present was
@@ -135,7 +135,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
public static <T extends Custom> T lookupPrototypeSafe(String type) {
@SuppressWarnings("unchecked")
- T proto = (T)customPrototypes.get(type);
+ T proto = (T) customPrototypes.get(type);
if (proto == null) {
throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins");
}
@@ -281,6 +281,16 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
sb.append("state uuid: ").append(stateUUID).append("\n");
sb.append("from_diff: ").append(wasReadFromDiff).append("\n");
sb.append("meta data version: ").append(metaData.version()).append("\n");
+ for (IndexMetaData indexMetaData : metaData) {
+ final String TAB = " ";
+ sb.append(TAB).append(indexMetaData.getIndex());
+ sb.append(": v[").append(indexMetaData.getVersion()).append("]\n");
+ for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
+ sb.append(TAB).append(TAB).append(shard).append(": ");
+ sb.append("p_term [").append(indexMetaData.primaryTerm(shard)).append("], ");
+ sb.append("a_ids ").append(indexMetaData.activeAllocationIds(shard)).append("\n");
+ }
+ }
sb.append(blocks().prettyPrint());
sb.append(nodes().prettyPrint());
sb.append(routingTable().prettyPrint());
@@ -477,6 +487,12 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
}
builder.endArray();
+ builder.startObject(IndexMetaData.KEY_PRIMARY_TERMS);
+ for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
+ builder.field(Integer.toString(shard), indexMetaData.primaryTerm(shard));
+ }
+ builder.endObject();
+
builder.startObject(IndexMetaData.KEY_ACTIVE_ALLOCATIONS);
for (IntObjectCursor<Set<String>> cursor : indexMetaData.getActiveAllocationIds()) {
builder.startArray(String.valueOf(cursor.key));
@@ -487,6 +503,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
}
builder.endObject();
+ // index metadata
builder.endObject();
}
builder.endObject();
@@ -683,16 +700,16 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
}
/**
- * @param data input bytes
- * @param localNode used to set the local node in the cluster state.
+ * @param data input bytes
+ * @param localNode used to set the local node in the cluster state.
*/
public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException {
return readFrom(StreamInput.wrap(data), localNode);
}
/**
- * @param in input stream
- * @param localNode used to set the local node in the cluster state. can be null.
+ * @param in input stream
+ * @param localNode used to set the local node in the cluster state. can be null.
*/
public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
return PROTO.readFrom(in, localNode);
@@ -791,17 +808,17 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
metaData = proto.metaData.readDiffFrom(in);
blocks = proto.blocks.readDiffFrom(in);
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
- new DiffableUtils.DiffableValueSerializer<String, Custom>() {
- @Override
- public Custom read(StreamInput in, String key) throws IOException {
- return lookupPrototypeSafe(key).readFrom(in);
- }
+ new DiffableUtils.DiffableValueSerializer<String, Custom>() {
+ @Override
+ public Custom read(StreamInput in, String key) throws IOException {
+ return lookupPrototypeSafe(key).readFrom(in);
+ }
- @Override
- public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
- return lookupPrototypeSafe(key).readDiffFrom(in);
- }
- });
+ @Override
+ public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
+ return lookupPrototypeSafe(key).readDiffFrom(in);
+ }
+ });
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
index 20ba36dd91..ca3c153e1d 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
@@ -19,6 +19,7 @@
package org.elasticsearch.cluster.metadata;
+import com.carrotsearch.hppc.LongArrayList;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
@@ -29,6 +30,8 @@ import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
@@ -56,6 +59,7 @@ import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.text.ParseException;
+import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
@@ -217,6 +221,13 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
.numberOfShards(1).numberOfReplicas(0).build();
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
+ static final String KEY_VERSION = "version";
+ static final String KEY_SETTINGS = "settings";
+ static final String KEY_STATE = "state";
+ static final String KEY_MAPPINGS = "mappings";
+ static final String KEY_ALIASES = "aliases";
+ public static final String KEY_PRIMARY_TERMS = "primary_terms";
+
public static final String INDEX_STATE_FILE_PREFIX = "state-";
private final int numberOfShards;
@@ -224,6 +235,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final Index index;
private final long version;
+ private final long[] primaryTerms;
private final State state;
@@ -247,7 +259,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final Version indexUpgradedVersion;
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
- private IndexMetaData(Index index, long version, State state, int numberOfShards, int numberOfReplicas, Settings settings,
+ private IndexMetaData(Index index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings,
ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> activeAllocationIds,
DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
@@ -255,6 +267,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
this.index = index;
this.version = version;
+ this.primaryTerms = primaryTerms;
+ assert primaryTerms.length == numberOfShards;
this.state = state;
this.numberOfShards = numberOfShards;
this.numberOfReplicas = numberOfReplicas;
@@ -296,6 +310,16 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
return this.version;
}
+
+ /**
+ * The term of the current selected primary. This is a non-negative number incremented when
+ * a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary
+ * See {@link AllocationService#updateMetaDataWithRoutingTable(MetaData, RoutingTable, RoutingTable)}.
+ **/
+ public long primaryTerm(int shardId) {
+ return this.primaryTerms[shardId];
+ }
+
/**
* Return the {@link Version} on which this index has been created. This
* information is typically useful for backward compatibility.
@@ -416,6 +440,10 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
IndexMetaData that = (IndexMetaData) o;
+ if (version != that.version) {
+ return false;
+ }
+
if (!aliases.equals(that.aliases)) {
return false;
}
@@ -434,6 +462,10 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
if (!customs.equals(that.customs)) {
return false;
}
+
+ if (Arrays.equals(primaryTerms, that.primaryTerms) == false) {
+ return false;
+ }
if (!activeAllocationIds.equals(that.activeAllocationIds)) {
return false;
}
@@ -443,14 +475,18 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
@Override
public int hashCode() {
int result = index.hashCode();
+ result = 31 * result + Long.hashCode(version);
result = 31 * result + state.hashCode();
result = 31 * result + aliases.hashCode();
result = 31 * result + settings.hashCode();
result = 31 * result + mappings.hashCode();
+ result = 31 * result + customs.hashCode();
+ result = 31 * result + Arrays.hashCode(primaryTerms);
result = 31 * result + activeAllocationIds.hashCode();
return result;
}
+
@Override
public Diff<IndexMetaData> diff(IndexMetaData previousState) {
return new IndexMetaDataDiff(previousState, this);
@@ -476,6 +512,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final String index;
private final long version;
+ private final long[] primaryTerms;
private final State state;
private final Settings settings;
private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
@@ -488,11 +525,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
version = after.version;
state = after.state;
settings = after.settings;
+ primaryTerms = after.primaryTerms;
mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
activeAllocationIds = DiffableUtils.diff(before.activeAllocationIds, after.activeAllocationIds,
- DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
+ DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
}
public IndexMetaDataDiff(StreamInput in) throws IOException {
@@ -500,22 +538,23 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
version = in.readLong();
state = State.fromId(in.readByte());
settings = Settings.readSettingsFromStream(in);
+ primaryTerms = in.readVLongArray();
mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
- new DiffableUtils.DiffableValueSerializer<String, Custom>() {
- @Override
- public Custom read(StreamInput in, String key) throws IOException {
- return lookupPrototypeSafe(key).readFrom(in);
- }
+ new DiffableUtils.DiffableValueSerializer<String, Custom>() {
+ @Override
+ public Custom read(StreamInput in, String key) throws IOException {
+ return lookupPrototypeSafe(key).readFrom(in);
+ }
- @Override
- public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
- return lookupPrototypeSafe(key).readDiffFrom(in);
- }
- });
+ @Override
+ public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
+ return lookupPrototypeSafe(key).readDiffFrom(in);
+ }
+ });
activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
- DiffableUtils.StringSetValueSerializer.getInstance());
+ DiffableUtils.StringSetValueSerializer.getInstance());
}
@Override
@@ -524,6 +563,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
out.writeLong(version);
out.writeByte(state.id);
Settings.writeSettingsToStream(settings, out);
+ out.writeVLongArray(primaryTerms);
mappings.writeTo(out);
aliases.writeTo(out);
customs.writeTo(out);
@@ -536,6 +576,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
builder.version(version);
builder.state(state);
builder.settings(settings);
+ builder.primaryTerms(primaryTerms);
builder.mappings.putAll(mappings.apply(part.mappings));
builder.aliases.putAll(aliases.apply(part.aliases));
builder.customs.putAll(customs.apply(part.customs));
@@ -550,6 +591,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
builder.version(in.readLong());
builder.state(State.fromId(in.readByte()));
builder.settings(readSettingsFromStream(in));
+ builder.primaryTerms(in.readVLongArray());
int mappingsSize = in.readVInt();
for (int i = 0; i < mappingsSize; i++) {
MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in);
@@ -581,6 +623,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
out.writeLong(version);
out.writeByte(state.id());
writeSettingsToStream(settings, out);
+ out.writeVLongArray(primaryTerms);
out.writeVInt(mappings.size());
for (ObjectCursor<MappingMetaData> cursor : mappings.values()) {
cursor.value.writeTo(out);
@@ -614,6 +657,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private String index;
private State state = State.OPEN;
private long version = 1;
+ private long[] primaryTerms = null;
private Settings settings = Settings.Builder.EMPTY_SETTINGS;
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
@@ -633,6 +677,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
this.state = indexMetaData.state;
this.version = indexMetaData.version;
this.settings = indexMetaData.getSettings();
+ this.primaryTerms = indexMetaData.primaryTerms.clone();
this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
@@ -672,8 +717,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
}
public Builder settings(Settings.Builder settings) {
- this.settings = settings.build();
- return this;
+ return settings(settings.build());
}
public Builder settings(Settings settings) {
@@ -741,6 +785,42 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
return this;
}
+ /**
+ * returns the primary term for the given shard.
+ * See {@link IndexMetaData#primaryTerm(int)} for more information.
+ */
+ public long primaryTerm(int shardId) {
+ if (primaryTerms == null) {
+ initializePrimaryTerms();
+ }
+ return this.primaryTerms[shardId];
+ }
+
+ /**
+ * sets the primary term for the given shard.
+ * See {@link IndexMetaData#primaryTerm(int)} for more information.
+ */
+ public Builder primaryTerm(int shardId, long primaryTerm) {
+ if (primaryTerms == null) {
+ initializePrimaryTerms();
+ }
+ this.primaryTerms[shardId] = primaryTerm;
+ return this;
+ }
+
+ private void primaryTerms(long[] primaryTerms) {
+ this.primaryTerms = primaryTerms.clone();
+ }
+
+ private void initializePrimaryTerms() {
+ assert primaryTerms == null;
+ if (numberOfShards() < 0) {
+ throw new IllegalStateException("you must set the number of shards before setting/reading primary terms");
+ }
+ primaryTerms = new long[numberOfShards()];
+ }
+
+
public IndexMetaData build() {
ImmutableOpenMap.Builder<String, AliasMetaData> tmpAliases = aliases;
Settings tmpSettings = settings;
@@ -815,27 +895,34 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
minimumCompatibleLuceneVersion = null;
}
+ if (primaryTerms == null) {
+ initializePrimaryTerms();
+ } else if (primaryTerms.length != numberOfShards) {
+ throw new IllegalStateException("primaryTerms length is [" + primaryTerms.length
+ + "] but should be equal to number of shards [" + numberOfShards() + "]");
+ }
+
final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
- return new IndexMetaData(new Index(index, uuid), version, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
- tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
- indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
+ return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
+ tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
+ indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
}
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(indexMetaData.getIndex().getName(), XContentBuilder.FieldCaseConversion.NONE);
- builder.field("version", indexMetaData.getVersion());
- builder.field("state", indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH));
+ builder.field(KEY_VERSION, indexMetaData.getVersion());
+ builder.field(KEY_STATE, indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH));
boolean binary = params.paramAsBoolean("binary", false);
- builder.startObject("settings");
+ builder.startObject(KEY_SETTINGS);
for (Map.Entry<String, String> entry : indexMetaData.getSettings().getAsMap().entrySet()) {
builder.field(entry.getKey(), entry.getValue());
}
builder.endObject();
- builder.startArray("mappings");
+ builder.startArray(KEY_MAPPINGS);
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.getMappings()) {
if (binary) {
builder.value(cursor.value.source().compressed());
@@ -855,12 +942,18 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
builder.endObject();
}
- builder.startObject("aliases");
+ builder.startObject(KEY_ALIASES);
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
AliasMetaData.Builder.toXContent(cursor.value, builder, params);
}
builder.endObject();
+ builder.startArray(KEY_PRIMARY_TERMS);
+ for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
+ builder.value(indexMetaData.primaryTerm(i));
+ }
+ builder.endArray();
+
builder.startObject(KEY_ACTIVE_ALLOCATIONS);
for (IntObjectCursor<Set<String>> cursor : indexMetaData.activeAllocationIds) {
builder.startArray(String.valueOf(cursor.key));
@@ -895,9 +988,9 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
- if ("settings".equals(currentFieldName)) {
+ if (KEY_SETTINGS.equals(currentFieldName)) {
builder.settings(Settings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())));
- } else if ("mappings".equals(currentFieldName)) {
+ } else if (KEY_MAPPINGS.equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
@@ -909,7 +1002,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
throw new IllegalArgumentException("Unexpected token: " + token);
}
}
- } else if ("aliases".equals(currentFieldName)) {
+ } else if (KEY_ALIASES.equals(currentFieldName)) {
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
}
@@ -949,7 +1042,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
}
}
} else if (token == XContentParser.Token.START_ARRAY) {
- if ("mappings".equals(currentFieldName)) {
+ if (KEY_MAPPINGS.equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
builder.putMapping(new MappingMetaData(new CompressedXContent(parser.binaryValue())));
@@ -961,13 +1054,23 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
}
}
}
+ } else if (KEY_PRIMARY_TERMS.equals(currentFieldName)) {
+ LongArrayList list = new LongArrayList();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.VALUE_NUMBER) {
+ list.add(parser.longValue());
+ } else {
+ throw new IllegalStateException("found a non-numeric value under [" + KEY_PRIMARY_TERMS + "]");
+ }
+ }
+ builder.primaryTerms(list.toArray());
} else {
throw new IllegalArgumentException("Unexpected field for an array " + currentFieldName);
}
} else if (token.isValue()) {
- if ("state".equals(currentFieldName)) {
+ if (KEY_STATE.equals(currentFieldName)) {
builder.state(State.fromString(parser.text()));
- } else if ("version".equals(currentFieldName)) {
+ } else if (KEY_VERSION.equals(currentFieldName)) {
builder.version(parser.longValue());
} else {
throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]");
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
index a34405c09e..c27e0a9beb 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
@@ -586,10 +586,6 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
if (indicesRouting == null) {
throw new IllegalStateException("once build is called the builder cannot be reused");
}
- // normalize the versions right before we build it...
- for (ObjectCursor<IndexRoutingTable> indexRoutingTable : indicesRouting.values()) {
- indicesRouting.put(indexRoutingTable.value.getIndex().getName(), indexRoutingTable.value);
- }
RoutingTable table = new RoutingTable(version, indicesRouting.build());
indicesRouting = null;
return table;
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java
index be7d90a1fe..b92fecf0f7 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java
@@ -139,7 +139,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
assert !(message == null && failure != null) : "provide a message if a failure exception is provided";
}
- UnassignedInfo(StreamInput in) throws IOException {
+ public UnassignedInfo(StreamInput in) throws IOException {
this.reason = Reason.values()[(int) in.readByte()];
this.unassignedTimeMillis = in.readLong();
// As System.nanoTime() cannot be compared across different JVMs, reset it to now.
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index 54f9b6855a..da0fea69c6 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -42,6 +42,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.GatewayAllocator;
+import org.elasticsearch.index.shard.ShardId;
import java.util.ArrayList;
import java.util.Collections;
@@ -98,7 +99,7 @@ public class AllocationService extends AbstractComponent {
if (withReroute) {
reroute(allocation);
}
- final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
+ final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
logClusterHealthStateChange(
@@ -107,37 +108,44 @@ public class AllocationService extends AbstractComponent {
"shards started [" + startedShardsAsString + "] ..."
);
return result;
- }
+ }
- protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes) {
- return buildChangedResult(metaData, routingNodes, new RoutingExplanations());
+ protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes) {
+ return buildChangedResult(oldMetaData, oldRoutingTable, newRoutingNodes, new RoutingExplanations());
}
- protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes, RoutingExplanations explanations) {
- final RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build();
- MetaData newMetaData = updateMetaDataWithRoutingTable(metaData,routingTable);
- return new RoutingAllocation.Result(true, routingTable.validateRaiseException(newMetaData), newMetaData, explanations);
+
+ protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes,
+ RoutingExplanations explanations) {
+ final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(newRoutingNodes).build();
+ MetaData newMetaData = updateMetaDataWithRoutingTable(oldMetaData, oldRoutingTable, newRoutingTable);
+ return new RoutingAllocation.Result(true, newRoutingTable.validateRaiseException(newMetaData), newMetaData, explanations);
}
/**
- * Updates the current {@link MetaData} based on the newly created {@link RoutingTable}.
+ * Updates the current {@link MetaData} based on the newly created {@link RoutingTable}. Specifically
+ * we update {@link IndexMetaData#getActiveAllocationIds()} and {@link IndexMetaData#primaryTerm(int)} based on
+ * the changes made during this allocation.
*
- * @param currentMetaData {@link MetaData} object from before the routing table was changed.
+ * @param oldMetaData {@link MetaData} object from before the routing table was changed.
+ * @param oldRoutingTable {@link RoutingTable} from before the change.
* @param newRoutingTable new {@link RoutingTable} created by the allocation change
* @return adapted {@link MetaData}, potentially the original one if no change was needed.
*/
- static MetaData updateMetaDataWithRoutingTable(MetaData currentMetaData, RoutingTable newRoutingTable) {
- // make sure index meta data and routing tables are in sync w.r.t active allocation ids
+ static MetaData updateMetaDataWithRoutingTable(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingTable newRoutingTable) {
MetaData.Builder metaDataBuilder = null;
- for (IndexRoutingTable indexRoutingTable : newRoutingTable) {
- final IndexMetaData indexMetaData = currentMetaData.index(indexRoutingTable.getIndex());
- if (indexMetaData == null) {
- throw new IllegalStateException("no metadata found for index " + indexRoutingTable.getIndex().getName());
+ for (IndexRoutingTable newIndexTable : newRoutingTable) {
+ final IndexMetaData oldIndexMetaData = oldMetaData.index(newIndexTable.getIndex());
+ if (oldIndexMetaData == null) {
+ throw new IllegalStateException("no metadata found for index " + newIndexTable.getIndex().getName());
}
IndexMetaData.Builder indexMetaDataBuilder = null;
- for (IndexShardRoutingTable shardRoutings : indexRoutingTable) {
- Set<String> activeAllocationIds = shardRoutings.activeShards().stream()
+ for (IndexShardRoutingTable newShardTable : newIndexTable) {
+ final ShardId shardId = newShardTable.shardId();
+
+ // update activeAllocationIds
+ Set<String> activeAllocationIds = newShardTable.activeShards().stream()
.map(ShardRouting::allocationId)
.filter(Objects::nonNull)
.map(AllocationId::getId)
@@ -145,19 +153,44 @@ public class AllocationService extends AbstractComponent {
// only update active allocation ids if there is an active shard
if (activeAllocationIds.isEmpty() == false) {
// get currently stored allocation ids
- Set<String> storedAllocationIds = indexMetaData.activeAllocationIds(shardRoutings.shardId().id());
+ Set<String> storedAllocationIds = oldIndexMetaData.activeAllocationIds(shardId.id());
if (activeAllocationIds.equals(storedAllocationIds) == false) {
if (indexMetaDataBuilder == null) {
- indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
+ indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
}
+ indexMetaDataBuilder.putActiveAllocationIds(shardId.id(), activeAllocationIds);
+ }
+ }
- indexMetaDataBuilder.putActiveAllocationIds(shardRoutings.shardId().id(), activeAllocationIds);
+ // update primary terms
+ final ShardRouting newPrimary = newShardTable.primaryShard();
+ if (newPrimary == null) {
+ throw new IllegalStateException("missing primary shard for " + newShardTable.shardId());
+ }
+ final ShardRouting oldPrimary = oldRoutingTable.shardRoutingTable(shardId).primaryShard();
+ if (oldPrimary == null) {
+ throw new IllegalStateException("missing primary shard for " + newShardTable.shardId());
+ }
+ // we update the primary term on initial assignment or when a replica is promoted. Most notably we do *not*
+ // update them when a primary relocates
+ if (newPrimary.unassigned() ||
+ newPrimary.isSameAllocation(oldPrimary) ||
+ // we do not use newPrimary.isTargetRelocationOf(oldPrimary) because that one enforces newPrimary to
+ // be initializing. However, when the target shard is activated, we still want the primary term to staty
+ // the same
+ (oldPrimary.relocating() && newPrimary.isSameAllocation(oldPrimary.buildTargetRelocatingShard()))) {
+ // do nothing
+ } else {
+ // incrementing the primary term
+ if (indexMetaDataBuilder == null) {
+ indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
}
+ indexMetaDataBuilder.primaryTerm(shardId.id(), oldIndexMetaData.primaryTerm(shardId.id()) + 1);
}
}
if (indexMetaDataBuilder != null) {
if (metaDataBuilder == null) {
- metaDataBuilder = MetaData.builder(currentMetaData);
+ metaDataBuilder = MetaData.builder(oldMetaData);
}
metaDataBuilder.put(indexMetaDataBuilder);
}
@@ -165,7 +198,7 @@ public class AllocationService extends AbstractComponent {
if (metaDataBuilder != null) {
return metaDataBuilder.build();
} else {
- return currentMetaData;
+ return oldMetaData;
}
}
@@ -196,7 +229,7 @@ public class AllocationService extends AbstractComponent {
}
gatewayAllocator.applyFailedShards(allocation);
reroute(allocation);
- final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
+ final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
logClusterHealthStateChange(
new ClusterStateHealth(clusterState),
@@ -243,7 +276,7 @@ public class AllocationService extends AbstractComponent {
// the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute
reroute(allocation);
- RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes, explanations);
+ RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes, explanations);
logClusterHealthStateChange(
new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
@@ -252,6 +285,7 @@ public class AllocationService extends AbstractComponent {
return result;
}
+
/**
* Reroutes the routing table based on the live nodes.
* <p>
@@ -275,7 +309,7 @@ public class AllocationService extends AbstractComponent {
if (!reroute(allocation)) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
}
- RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
+ RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
logClusterHealthStateChange(
new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
@@ -412,8 +446,8 @@ public class AllocationService extends AbstractComponent {
boolean changed = false;
for (ShardRouting routing : replicas) {
changed |= applyFailedShard(allocation, routing, false,
- new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing",
- null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
+ new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing",
+ null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
}
return changed;
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
index 4e6ba0fb5a..536806c083 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
@@ -44,7 +44,7 @@ import static java.util.Collections.unmodifiableSet;
public class RoutingAllocation {
/**
- * this class is used to describe results of a {@link RoutingAllocation}
+ * this class is used to describe results of a {@link RoutingAllocation}
*/
public static class Result {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
index 8102f20679..97a07169d2 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
@@ -23,6 +23,7 @@ import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.IntroSorter;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -101,6 +102,12 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
@Override
+ public Map<DiscoveryNode, Float> weighShard(RoutingAllocation allocation, ShardRouting shard) {
+ final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
+ return balancer.weighShard(shard);
+ }
+
+ @Override
public boolean allocate(RoutingAllocation allocation) {
if (allocation.routingNodes().size() == 0) {
/* with no nodes this is pointless */
@@ -298,6 +305,29 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return balanceByWeights();
}
+ public Map<DiscoveryNode, Float> weighShard(ShardRouting shard) {
+ final NodeSorter sorter = newNodeSorter();
+ final ModelNode[] modelNodes = sorter.modelNodes;
+ final float[] weights = sorter.weights;
+
+ buildWeightOrderedIndices(sorter);
+ Map<DiscoveryNode, Float> nodes = new HashMap<>(modelNodes.length);
+ float currentNodeWeight = 0.0f;
+ for (int i = 0; i < modelNodes.length; i++) {
+ if (modelNodes[i].getNodeId().equals(shard.currentNodeId())) {
+ // If a node was found with the shard, use that weight instead of 0.0
+ currentNodeWeight = weights[i];
+ break;
+ }
+ }
+
+ for (int i = 0; i < modelNodes.length; i++) {
+ final float delta = currentNodeWeight - weights[i];
+ nodes.put(modelNodes[i].getRoutingNode().node(), delta);
+ }
+ return nodes;
+ }
+
/**
* Balances the nodes on the cluster model according to the weight
* function. The configured threshold is the minimum delta between the
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java
index 0bf07e8cba..aa59e7788f 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ShardsAllocator.java
@@ -19,8 +19,11 @@
package org.elasticsearch.cluster.routing.allocation.allocator;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import java.util.Map;
/**
* <p>
* A {@link ShardsAllocator} is the main entry point for shard allocation on nodes in the cluster.
@@ -40,4 +43,15 @@ public interface ShardsAllocator {
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
*/
boolean allocate(RoutingAllocation allocation);
+
+ /**
+ * Returns a map of node to a float "weight" of where the allocator would like to place the shard.
+ * Higher weights signify greater desire to place the shard on that node.
+ * Does not modify the allocation at all.
+ *
+ * @param allocation current node allocation
+ * @param shard shard to weigh
+ * @return map of nodes to float weights
+ */
+ Map<DiscoveryNode, Float> weighShard(RoutingAllocation allocation, ShardRouting shard);
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
index 227ec27746..baa0a3b1c0 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
@@ -110,7 +110,8 @@ public class AwarenessAllocationDecider extends AllocationDecider {
this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes);
setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings));
- clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, this::setForcedAwarenessAttributes);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
+ this::setForcedAwarenessAttributes);
}
private void setForcedAwarenessAttributes(Settings forceSettings) {
@@ -150,7 +151,7 @@ public class AwarenessAllocationDecider extends AllocationDecider {
private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation, boolean moveToNode) {
if (awarenessAttributes.length == 0) {
- return allocation.decision(Decision.YES, NAME, "no allocation awareness enabled");
+ return allocation.decision(Decision.YES, NAME, "allocation awareness is not enabled");
}
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
@@ -158,7 +159,7 @@ public class AwarenessAllocationDecider extends AllocationDecider {
for (String awarenessAttribute : awarenessAttributes) {
// the node the shard exists on must be associated with an awareness attribute
if (!node.node().attributes().containsKey(awarenessAttribute)) {
- return allocation.decision(Decision.NO, NAME, "node does not contain awareness attribute: [%s]", awarenessAttribute);
+ return allocation.decision(Decision.NO, NAME, "node does not contain the awareness attribute: [%s]", awarenessAttribute);
}
// build attr_value -> nodes map
@@ -180,7 +181,8 @@ public class AwarenessAllocationDecider extends AllocationDecider {
String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId();
if (!node.nodeId().equals(nodeId)) {
// we work on different nodes, move counts around
- shardPerAttribute.putOrAdd(allocation.routingNodes().node(nodeId).node().attributes().get(awarenessAttribute), 0, -1);
+ shardPerAttribute.putOrAdd(allocation.routingNodes().node(nodeId).node().attributes().get(awarenessAttribute),
+ 0, -1);
shardPerAttribute.addTo(node.node().attributes().get(awarenessAttribute), 1);
}
} else {
@@ -215,8 +217,15 @@ public class AwarenessAllocationDecider extends AllocationDecider {
// if we are above with leftover, then we know we are not good, even with mod
if (currentNodeCount > (requiredCountPerAttribute + leftoverPerAttribute)) {
return allocation.decision(Decision.NO, NAME,
- "too many shards on node for attribute: [%s], required per attribute: [%d], node count: [%d], leftover: [%d]",
- awarenessAttribute, requiredCountPerAttribute, currentNodeCount, leftoverPerAttribute);
+ "there are too many shards on the node for attribute [%s], there are [%d] total shards for the index " +
+ " and [%d] total attributes values, expected the node count [%d] to be lower or equal to the required " +
+ "number of shards per attribute [%d] plus leftover [%d]",
+ awarenessAttribute,
+ shardCount,
+ numberOfAttributes,
+ currentNodeCount,
+ requiredCountPerAttribute,
+ leftoverPerAttribute);
}
// all is well, we are below or same as average
if (currentNodeCount <= requiredCountPerAttribute) {
@@ -224,6 +233,6 @@ public class AwarenessAllocationDecider extends AllocationDecider {
}
}
- return allocation.decision(Decision.YES, NAME, "node meets awareness requirements");
+ return allocation.decision(Decision.YES, NAME, "node meets all awareness attribute requirements");
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
index 84e974aceb..740c99016d 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
@@ -78,7 +78,8 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
} else if ("indices_all_active".equalsIgnoreCase(typeString) || "indicesAllActive".equalsIgnoreCase(typeString)) {
return ClusterRebalanceType.INDICES_ALL_ACTIVE;
}
- throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING + ": " + typeString);
+ throw new IllegalArgumentException("Illegal value for " +
+ CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING + ": " + typeString);
}
}
@@ -90,10 +91,13 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
try {
type = CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(settings);
} catch (IllegalStateException e) {
- logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings));
+ logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'",
+ CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
+ CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings));
type = ClusterRebalanceType.INDICES_ALL_ACTIVE;
}
- logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), type.toString().toLowerCase(Locale.ROOT));
+ logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
+ type.toString().toLowerCase(Locale.ROOT));
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType);
}
@@ -112,11 +116,13 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
if (type == ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE) {
// check if there are unassigned primaries.
if ( allocation.routingNodes().hasUnassignedPrimaries() ) {
- return allocation.decision(Decision.NO, NAME, "cluster has unassigned primary shards");
+ return allocation.decision(Decision.NO, NAME,
+ "the cluster has unassigned primary shards and rebalance type is set to [%s]", type);
}
// check if there are initializing primaries that don't have a relocatingNodeId entry.
if ( allocation.routingNodes().hasInactivePrimaries() ) {
- return allocation.decision(Decision.NO, NAME, "cluster has inactive primary shards");
+ return allocation.decision(Decision.NO, NAME,
+ "the cluster has inactive primary shards and rebalance type is set to [%s]", type);
}
return allocation.decision(Decision.YES, NAME, "all primary shards are active");
@@ -124,15 +130,17 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
if (type == ClusterRebalanceType.INDICES_ALL_ACTIVE) {
// check if there are unassigned shards.
if (allocation.routingNodes().hasUnassignedShards() ) {
- return allocation.decision(Decision.NO, NAME, "cluster has unassigned shards");
+ return allocation.decision(Decision.NO, NAME,
+ "the cluster has unassigned shards and rebalance type is set to [%s]", type);
}
// in case all indices are assigned, are there initializing shards which
// are not relocating?
if ( allocation.routingNodes().hasInactiveShards() ) {
- return allocation.decision(Decision.NO, NAME, "cluster has inactive shards");
+ return allocation.decision(Decision.NO, NAME,
+ "the cluster has inactive shards and rebalance type is set to [%s]", type);
}
}
// type == Type.ALWAYS
- return allocation.decision(Decision.YES, NAME, "all shards are active");
+ return allocation.decision(Decision.YES, NAME, "all shards are active, rebalance type is [%s]", type);
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
index fe6bf918dc..2c46f7bd54 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
@@ -53,7 +53,8 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
super(settings);
this.clusterConcurrentRebalance = CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings);
logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance);
- clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, this::setClusterConcurrentRebalance);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
+ this::setClusterConcurrentRebalance);
}
private void setClusterConcurrentRebalance(int concurrentRebalance) {
@@ -63,12 +64,16 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
@Override
public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
if (clusterConcurrentRebalance == -1) {
- return allocation.decision(Decision.YES, NAME, "all concurrent rebalances are allowed");
+ return allocation.decision(Decision.YES, NAME, "unlimited concurrent rebalances are allowed");
}
- if (allocation.routingNodes().getRelocatingShardCount() >= clusterConcurrentRebalance) {
- return allocation.decision(Decision.NO, NAME, "too many concurrent rebalances [%d], limit: [%d]",
- allocation.routingNodes().getRelocatingShardCount(), clusterConcurrentRebalance);
+ int relocatingShards = allocation.routingNodes().getRelocatingShardCount();
+ if (relocatingShards >= clusterConcurrentRebalance) {
+ return allocation.decision(Decision.NO, NAME,
+ "too many shards are concurrently rebalancing [%d], limit: [%d]",
+ relocatingShards, clusterConcurrentRebalance);
}
- return allocation.decision(Decision.YES, NAME, "below threshold [%d] for concurrent rebalances", clusterConcurrentRebalance);
+ return allocation.decision(Decision.YES, NAME,
+ "below threshold [%d] for concurrent rebalances, current rebalance shard count [%d]",
+ clusterConcurrentRebalance, relocatingShards);
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java
index 02fc2fef94..ebf9230290 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java
@@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.Locale;
@@ -146,6 +147,11 @@ public abstract class Decision implements ToXContent {
public abstract String label();
/**
+ * Return the list of all decisions that make up this decision
+ */
+ public abstract List<Decision> getDecisions();
+
+ /**
* Simple class representing a single decision
*/
public static class Single extends Decision {
@@ -191,6 +197,11 @@ public abstract class Decision implements ToXContent {
return this.label;
}
+ @Override
+ public List<Decision> getDecisions() {
+ return Collections.singletonList(this);
+ }
+
/**
* Returns the explanation string, fully formatted. Only formats the string once
*/
@@ -202,11 +213,35 @@ public abstract class Decision implements ToXContent {
}
@Override
+ public boolean equals(Object object) {
+ if (this == object) {
+ return true;
+ }
+
+ if (object == null || getClass() != object.getClass()) {
+ return false;
+ }
+
+ Decision.Single s = (Decision.Single) object;
+ return this.type == s.type &&
+ this.label.equals(s.label) &&
+ this.getExplanation().equals(s.getExplanation());
+ }
+
+ @Override
+ public int hashCode() {
+ int result = this.type.hashCode();
+ result = 31 * result + this.label.hashCode();
+ result = 31 * result + this.getExplanation().hashCode();
+ return result;
+ }
+
+ @Override
public String toString() {
- if (explanation == null) {
- return type + "()";
+ if (explanationString != null || explanation != null) {
+ return type + "(" + getExplanation() + ")";
}
- return type + "(" + getExplanation() + ")";
+ return type + "()";
}
@Override
@@ -259,6 +294,31 @@ public abstract class Decision implements ToXContent {
}
@Override
+ public List<Decision> getDecisions() {
+ return Collections.unmodifiableList(this.decisions);
+ }
+
+ @Override
+ public boolean equals(final Object object) {
+ if (this == object) {
+ return true;
+ }
+
+ if (object == null || getClass() != object.getClass()) {
+ return false;
+ }
+
+ final Decision.Multi m = (Decision.Multi) object;
+
+ return this.decisions.equals(m.decisions);
+ }
+
+ @Override
+ public int hashCode() {
+ return 31 * decisions.hashCode();
+ }
+
+ @Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (Decision decision : decisions) {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
index e2124558f2..890bbd3c31 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
@@ -164,7 +164,8 @@ public class DiskThresholdDecider extends AllocationDecider {
reroute = true;
explanation = "high disk watermark exceeded on one or more nodes";
} else {
- logger.debug("high disk watermark exceeded on {} but an automatic reroute has occurred in the last [{}], skipping reroute",
+ logger.debug("high disk watermark exceeded on {} but an automatic reroute has occurred " +
+ "in the last [{}], skipping reroute",
node, DiskThresholdDecider.this.rerouteInterval);
}
nodeHasPassedWatermark.add(node);
@@ -183,7 +184,8 @@ public class DiskThresholdDecider extends AllocationDecider {
explanation = "one or more nodes has gone under the high or low watermark";
nodeHasPassedWatermark.remove(node);
} else {
- logger.debug("{} has gone below a disk threshold, but an automatic reroute has occurred in the last [{}], skipping reroute",
+ logger.debug("{} has gone below a disk threshold, but an automatic reroute has occurred " +
+ "in the last [{}], skipping reroute",
node, DiskThresholdDecider.this.rerouteInterval);
}
}
@@ -238,13 +240,15 @@ public class DiskThresholdDecider extends AllocationDecider {
private void setLowWatermark(String lowWatermark) {
// Watermark is expressed in terms of used data, but we need "free" data watermark
this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
- this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
+ this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark,
+ CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
}
private void setHighWatermark(String highWatermark) {
// Watermark is expressed in terms of used data, but we need "free" data watermark
this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);
- this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
+ this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark,
+ CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
}
// For Testing
@@ -299,7 +303,8 @@ public class DiskThresholdDecider extends AllocationDecider {
* If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size
* of all shards
*/
- public static long sizeOfRelocatingShards(RoutingNode node, ClusterInfo clusterInfo, boolean subtractShardsMovingAway, String dataPath) {
+ public static long sizeOfRelocatingShards(RoutingNode node, ClusterInfo clusterInfo,
+ boolean subtractShardsMovingAway, String dataPath) {
long totalSize = 0;
for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) {
String actualPath = clusterInfo.getDataPath(routing);
@@ -353,7 +358,8 @@ public class DiskThresholdDecider extends AllocationDecider {
logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, preventing allocation",
freeBytesThresholdLow, freeBytes, node.nodeId());
}
- return allocation.decision(Decision.NO, NAME, "less than required [%s] free on node, free: [%s]",
+ return allocation.decision(Decision.NO, NAME,
+ "the node is above the low watermark and has less than required [%s] free, free: [%s]",
freeBytesThresholdLow, new ByteSizeValue(freeBytes));
} else if (freeBytes > freeBytesThresholdHigh.bytes()) {
// Allow the shard to be allocated because it is primary that
@@ -363,7 +369,8 @@ public class DiskThresholdDecider extends AllocationDecider {
"but allowing allocation because primary has never been allocated",
freeBytesThresholdLow, freeBytes, node.nodeId());
}
- return allocation.decision(Decision.YES, NAME, "primary has never been allocated before");
+ return allocation.decision(Decision.YES, NAME,
+ "the node is above the low watermark, but this primary shard has never been allocated before");
} else {
// Even though the primary has never been allocated, the node is
// above the high watermark, so don't allow allocating the shard
@@ -372,7 +379,9 @@ public class DiskThresholdDecider extends AllocationDecider {
"preventing allocation even though primary has never been allocated",
freeBytesThresholdHigh, freeBytes, node.nodeId());
}
- return allocation.decision(Decision.NO, NAME, "less than required [%s] free on node, free: [%s]",
+ return allocation.decision(Decision.NO, NAME,
+ "the node is above the high watermark even though this shard has never been allocated " +
+ "and has less than required [%s] free on node, free: [%s]",
freeBytesThresholdHigh, new ByteSizeValue(freeBytes));
}
}
@@ -386,7 +395,8 @@ public class DiskThresholdDecider extends AllocationDecider {
Strings.format1Decimals(usedDiskThresholdLow, "%"),
Strings.format1Decimals(usedDiskPercentage, "%"), node.nodeId());
}
- return allocation.decision(Decision.NO, NAME, "more than allowed [%s%%] used disk on node, free: [%s%%]",
+ return allocation.decision(Decision.NO, NAME,
+ "the node is above the low watermark and has more than allowed [%s%%] used disk, free: [%s%%]",
usedDiskThresholdLow, freeDiskPercentage);
} else if (freeDiskPercentage > freeDiskThresholdHigh) {
// Allow the shard to be allocated because it is primary that
@@ -397,7 +407,8 @@ public class DiskThresholdDecider extends AllocationDecider {
Strings.format1Decimals(usedDiskThresholdLow, "%"),
Strings.format1Decimals(usedDiskPercentage, "%"), node.nodeId());
}
- return allocation.decision(Decision.YES, NAME, "primary has never been allocated before");
+ return allocation.decision(Decision.YES, NAME,
+ "the node is above the low watermark, but this primary shard has never been allocated before");
} else {
// Even though the primary has never been allocated, the node is
// above the high watermark, so don't allow allocating the shard
@@ -407,7 +418,9 @@ public class DiskThresholdDecider extends AllocationDecider {
Strings.format1Decimals(freeDiskThresholdHigh, "%"),
Strings.format1Decimals(freeDiskPercentage, "%"), node.nodeId());
}
- return allocation.decision(Decision.NO, NAME, "more than allowed [%s%%] used disk on node, free: [%s%%]",
+ return allocation.decision(Decision.NO, NAME,
+ "the node is above the high watermark even though this shard has never been allocated " +
+ "and has more than allowed [%s%%] used disk, free: [%s%%]",
usedDiskThresholdHigh, freeDiskPercentage);
}
}
@@ -417,19 +430,29 @@ public class DiskThresholdDecider extends AllocationDecider {
double freeSpaceAfterShard = freeDiskPercentageAfterShardAssigned(usage, shardSize);
long freeBytesAfterShard = freeBytes - shardSize;
if (freeBytesAfterShard < freeBytesThresholdHigh.bytes()) {
- logger.warn("after allocating, node [{}] would have less than the required {} free bytes threshold ({} bytes free), preventing allocation",
+ logger.warn("after allocating, node [{}] would have less than the required " +
+ "{} free bytes threshold ({} bytes free), preventing allocation",
node.nodeId(), freeBytesThresholdHigh, freeBytesAfterShard);
- return allocation.decision(Decision.NO, NAME, "after allocation less than required [%s] free on node, free: [%s]",
+ return allocation.decision(Decision.NO, NAME,
+ "after allocating the shard to this node, it would be above the high watermark " +
+ "and have less than required [%s] free, free: [%s]",
freeBytesThresholdLow, new ByteSizeValue(freeBytesAfterShard));
}
if (freeSpaceAfterShard < freeDiskThresholdHigh) {
- logger.warn("after allocating, node [{}] would have more than the allowed {} free disk threshold ({} free), preventing allocation",
+ logger.warn("after allocating, node [{}] would have more than the allowed " +
+ "{} free disk threshold ({} free), preventing allocation",
node.nodeId(), Strings.format1Decimals(freeDiskThresholdHigh, "%"), Strings.format1Decimals(freeSpaceAfterShard, "%"));
- return allocation.decision(Decision.NO, NAME, "after allocation more than allowed [%s%%] used disk on node, free: [%s%%]",
+ return allocation.decision(Decision.NO, NAME,
+ "after allocating the shard to this node, it would be above the high watermark " +
+ "and have more than allowed [%s%%] used disk, free: [%s%%]",
usedDiskThresholdLow, freeSpaceAfterShard);
}
- return allocation.decision(Decision.YES, NAME, "enough disk for shard on node, free: [%s]", new ByteSizeValue(freeBytes));
+ return allocation.decision(Decision.YES, NAME,
+ "enough disk for shard on node, free: [%s], shard size: [%s], free after allocating shard: [%s]",
+ new ByteSizeValue(freeBytes),
+ new ByteSizeValue(shardSize),
+ new ByteSizeValue(freeBytesAfterShard));
}
@Override
@@ -453,14 +476,17 @@ public class DiskThresholdDecider extends AllocationDecider {
logger.trace("node [{}] has {}% free disk ({} bytes)", node.nodeId(), freeDiskPercentage, freeBytes);
}
if (dataPath == null || usage.getPath().equals(dataPath) == false) {
- return allocation.decision(Decision.YES, NAME, "shard is not allocated on the most utilized disk");
+ return allocation.decision(Decision.YES, NAME,
+ "this shard is not allocated on the most utilized disk and can remain");
}
if (freeBytes < freeBytesThresholdHigh.bytes()) {
if (logger.isDebugEnabled()) {
logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, shard cannot remain",
freeBytesThresholdHigh, freeBytes, node.nodeId());
}
- return allocation.decision(Decision.NO, NAME, "after allocation less than required [%s] free on node, free: [%s]",
+ return allocation.decision(Decision.NO, NAME,
+ "after allocating this shard this node would be above the high watermark " +
+ "and there would be less than required [%s] free on node, free: [%s]",
freeBytesThresholdHigh, new ByteSizeValue(freeBytes));
}
if (freeDiskPercentage < freeDiskThresholdHigh) {
@@ -468,11 +494,14 @@ public class DiskThresholdDecider extends AllocationDecider {
logger.debug("less than the required {}% free disk threshold ({}% free) on node {}, shard cannot remain",
freeDiskThresholdHigh, freeDiskPercentage, node.nodeId());
}
- return allocation.decision(Decision.NO, NAME, "after allocation less than required [%s%%] free disk on node, free: [%s%%]",
+ return allocation.decision(Decision.NO, NAME,
+ "after allocating this shard this node would be above the high watermark " +
+ "and there would be less than required [%s%%] free disk on node, free: [%s%%]",
freeDiskThresholdHigh, freeDiskPercentage);
}
- return allocation.decision(Decision.YES, NAME, "enough disk for shard to remain on node, free: [%s]", new ByteSizeValue(freeBytes));
+ return allocation.decision(Decision.YES, NAME,
+ "there is enough disk on this node for the shard to remain, free: [%s]", new ByteSizeValue(freeBytes));
}
private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) {
@@ -543,7 +572,8 @@ public class DiskThresholdDecider extends AllocationDecider {
try {
return RatioValue.parseRatioValue(watermark).getAsPercent();
} catch (ElasticsearchParseException ex) {
- // NOTE: this is not end-user leniency, since up above we check that it's a valid byte or percentage, and then store the two cases separately
+ // NOTE: this is not end-user leniency, since up above we check that it's a valid byte or percentage, and then store the two
+ // cases separately
return 100.0;
}
}
@@ -556,7 +586,8 @@ public class DiskThresholdDecider extends AllocationDecider {
try {
return ByteSizeValue.parseBytesSizeValue(watermark, settingName);
} catch (ElasticsearchParseException ex) {
- // NOTE: this is not end-user leniency, since up above we check that it's a valid byte or percentage, and then store the two cases separately
+ // NOTE: this is not end-user leniency, since up above we check that it's a valid byte or percentage, and then store the two
+ // cases separately
return ByteSizeValue.parseBytesSizeValue("0b", settingName);
}
}
@@ -583,7 +614,7 @@ public class DiskThresholdDecider extends AllocationDecider {
private Decision earlyTerminate(RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) {
// Always allow allocation if the decider is disabled
if (!enabled) {
- return allocation.decision(Decision.YES, NAME, "disk threshold decider disabled");
+ return allocation.decision(Decision.YES, NAME, "the disk threshold decider is disabled");
}
// Allow allocation regardless if only a single data node is available
@@ -591,7 +622,7 @@ public class DiskThresholdDecider extends AllocationDecider {
if (logger.isTraceEnabled()) {
logger.trace("only a single data node is present, allowing allocation");
}
- return allocation.decision(Decision.YES, NAME, "only a single data node is present");
+ return allocation.decision(Decision.YES, NAME, "there is only a single data node present");
}
// Fail open there is no info available
@@ -600,7 +631,7 @@ public class DiskThresholdDecider extends AllocationDecider {
if (logger.isTraceEnabled()) {
logger.trace("cluster info unavailable for disk threshold decider, allowing allocation.");
}
- return allocation.decision(Decision.YES, NAME, "cluster info unavailable");
+ return allocation.decision(Decision.YES, NAME, "the cluster info is unavailable");
}
// Fail open if there are no disk usages available
@@ -608,7 +639,7 @@ public class DiskThresholdDecider extends AllocationDecider {
if (logger.isTraceEnabled()) {
logger.trace("unable to determine disk usages for disk-aware allocation, allowing allocation");
}
- return allocation.decision(Decision.YES, NAME, "disk usages unavailable");
+ return allocation.decision(Decision.YES, NAME, "disk usages are unavailable");
}
return null;
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
index 0b69ba2a19..38a2a39fc7 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
@@ -32,8 +32,9 @@ import org.elasticsearch.common.settings.Settings;
import java.util.Locale;
/**
- * This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} /
- * {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link #INDEX_ROUTING_REBALANCE_ENABLE_SETTING}.
+ * This allocation decider allows shard allocations / rebalancing via the cluster wide settings
+ * {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting
+ * {@link #INDEX_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link #INDEX_ROUTING_REBALANCE_ENABLE_SETTING}.
* The per index settings overrides the cluster wide setting.
*
* <p>
@@ -98,7 +99,7 @@ public class EnableAllocationDecider extends AllocationDecider {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
if (allocation.ignoreDisable()) {
- return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
+ return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of allocation");
}
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
@@ -133,7 +134,7 @@ public class EnableAllocationDecider extends AllocationDecider {
@Override
public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
if (allocation.ignoreDisable()) {
- return allocation.decision(Decision.YES, NAME, "rebalance disabling is ignored");
+ return allocation.decision(Decision.YES, NAME, "allocation is explicitly ignoring any disabling of relocation");
}
Settings indexSettings = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()).getSettings();
@@ -167,7 +168,8 @@ public class EnableAllocationDecider extends AllocationDecider {
/**
* Allocation values or rather their string representation to be used used with
- * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE_SETTING}
+ * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} /
+ * {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE_SETTING}
* via cluster / index settings.
*/
public enum Allocation {
@@ -193,7 +195,8 @@ public class EnableAllocationDecider extends AllocationDecider {
/**
* Rebalance values or rather their string representation to be used used with
- * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE_SETTING}
+ * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} /
+ * {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE_SETTING}
* via cluster / index settings.
*/
public enum Rebalance {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
index d1aa0d8b58..eb59c26121 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
@@ -50,11 +50,14 @@ import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
* would disallow the allocation. Filters are applied in the following order:
* <ol>
* <li><tt>required</tt> - filters required allocations.
- * If any <tt>required</tt> filters are set the allocation is denied if the index is <b>not</b> in the set of <tt>required</tt> to allocate on the filtered node</li>
+ * If any <tt>required</tt> filters are set the allocation is denied if the index is <b>not</b> in the set of <tt>required</tt> to allocate
+ * on the filtered node</li>
* <li><tt>include</tt> - filters "allowed" allocations.
- * If any <tt>include</tt> filters are set the allocation is denied if the index is <b>not</b> in the set of <tt>include</tt> filters for the filtered node</li>
+ * If any <tt>include</tt> filters are set the allocation is denied if the index is <b>not</b> in the set of <tt>include</tt> filters for
+ * the filtered node</li>
* <li><tt>exclude</tt> - filters "prohibited" allocations.
- * If any <tt>exclude</tt> filters are set the allocation is denied if the index is in the set of <tt>exclude</tt> filters for the filtered node</li>
+ * If any <tt>exclude</tt> filters are set the allocation is denied if the index is in the set of <tt>exclude</tt> filters for the
+ * filtered node</li>
* </ol>
*/
public class FilterAllocationDecider extends AllocationDecider {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
index eb9c5cf8ee..95540d89a6 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java
@@ -52,7 +52,7 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
return isVersionCompatible(shardRouting.restoreSource(), node, allocation);
} else {
// fresh primary, we can allocate wherever
- return allocation.decision(Decision.YES, NAME, "primary shard can be allocated anywhere");
+ return allocation.decision(Decision.YES, NAME, "the primary shard is new and can be allocated anywhere");
}
} else {
// relocating primary, only migrate to newer host
@@ -70,16 +70,17 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
}
}
- private Decision isVersionCompatible(final RoutingNodes routingNodes, final String sourceNodeId, final RoutingNode target, RoutingAllocation allocation) {
+ private Decision isVersionCompatible(final RoutingNodes routingNodes, final String sourceNodeId, final RoutingNode target,
+ RoutingAllocation allocation) {
final RoutingNode source = routingNodes.node(sourceNodeId);
if (target.node().version().onOrAfter(source.node().version())) {
/* we can allocate if we can recover from a node that is younger or on the same version
* if the primary is already running on a newer version that won't work due to possible
* differences in the lucene index format etc.*/
- return allocation.decision(Decision.YES, NAME, "target node version [%s] is same or newer than source node version [%s]",
+ return allocation.decision(Decision.YES, NAME, "target node version [%s] is the same or newer than source node version [%s]",
target.node().version(), source.node().version());
} else {
- return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than source node version [%s]",
+ return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than the source node version [%s]",
target.node().version(), source.node().version());
}
}
@@ -87,10 +88,10 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
private Decision isVersionCompatible(RestoreSource restoreSource, final RoutingNode target, RoutingAllocation allocation) {
if (target.node().version().onOrAfter(restoreSource.version())) {
/* we can allocate if we can restore from a snapshot that is older or on the same version */
- return allocation.decision(Decision.YES, NAME, "target node version [%s] is same or newer than snapshot version [%s]",
+ return allocation.decision(Decision.YES, NAME, "target node version [%s] is the same or newer than snapshot version [%s]",
target.node().version(), restoreSource.version());
} else {
- return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than snapshot version [%s]",
+ return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than the snapshot version [%s]",
target.node().version(), restoreSource.version());
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java
index 039abd8749..869c631306 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/RebalanceOnlyWhenActiveAllocationDecider.java
@@ -41,8 +41,8 @@ public class RebalanceOnlyWhenActiveAllocationDecider extends AllocationDecider
// its ok to check for active here, since in relocation, a shard is split into two in routing
// nodes, once relocating, and one initializing
if (!allocation.routingNodes().allReplicasActive(shardRouting)) {
- return allocation.decision(Decision.NO, NAME, "not all replicas are active in cluster");
+ return allocation.decision(Decision.NO, NAME, "rebalancing can not occur if not all replicas are active in the cluster");
}
- return allocation.decision(Decision.YES, NAME, "all replicas are active in cluster");
+ return allocation.decision(Decision.YES, NAME, "all replicas are active in the cluster, rebalancing can occur");
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java
index 1c5a3f93b7..59ab67c309 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ReplicaAfterPrimaryActiveAllocationDecider.java
@@ -45,12 +45,12 @@ public class ReplicaAfterPrimaryActiveAllocationDecider extends AllocationDecide
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
if (shardRouting.primary()) {
- return allocation.decision(Decision.YES, NAME, "shard is primary");
+ return allocation.decision(Decision.YES, NAME, "shard is primary and can be allocated");
}
ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting);
if (primary == null) {
- return allocation.decision(Decision.NO, NAME, "primary shard is not yet active");
+ return allocation.decision(Decision.NO, NAME, "primary shard for this replica is not yet active");
}
- return allocation.decision(Decision.YES, NAME, "primary is already active");
+ return allocation.decision(Decision.YES, NAME, "primary shard for this replica is already active");
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
index 44eb7d0e2f..f0b4fdf35c 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
@@ -61,7 +61,8 @@ public class SameShardAllocationDecider extends AllocationDecider {
Iterable<ShardRouting> assignedShards = allocation.routingNodes().assignedShards(shardRouting);
for (ShardRouting assignedShard : assignedShards) {
if (node.nodeId().equals(assignedShard.currentNodeId())) {
- return allocation.decision(Decision.NO, NAME, "shard cannot be allocated on same node [%s] it already exists on", node.nodeId());
+ return allocation.decision(Decision.NO, NAME,
+ "the shard cannot be allocated on the same node id [%s] on which it already exists", node.nodeId());
}
}
if (sameHost) {
@@ -85,7 +86,7 @@ public class SameShardAllocationDecider extends AllocationDecider {
for (ShardRouting assignedShard : assignedShards) {
if (checkNode.nodeId().equals(assignedShard.currentNodeId())) {
return allocation.decision(Decision.NO, NAME,
- "shard cannot be allocated on same host [%s] it already exists on", node.nodeId());
+ "shard cannot be allocated on the same host [%s] on which it already exists", node.nodeId());
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
index 04247525f1..eb25651635 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
@@ -93,7 +93,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
final int clusterShardLimit = this.clusterShardLimit;
if (indexShardLimit <= 0 && clusterShardLimit <= 0) {
- return allocation.decision(Decision.YES, NAME, "total shard limit disabled: [index: %d, cluster: %d] <= 0",
+ return allocation.decision(Decision.YES, NAME, "total shard limits are disabled: [index: %d, cluster: %d] <= 0",
indexShardLimit, clusterShardLimit);
}
@@ -110,14 +110,16 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
}
}
if (clusterShardLimit > 0 && nodeShardCount >= clusterShardLimit) {
- return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], limit: [%d]",
+ return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], cluster-level limit per node: [%d]",
nodeShardCount, clusterShardLimit);
}
if (indexShardLimit > 0 && indexShardCount >= indexShardLimit) {
- return allocation.decision(Decision.NO, NAME, "too many shards for this index [%s] on node [%d], limit: [%d]",
+ return allocation.decision(Decision.NO, NAME,
+ "too many shards for this index [%s] on node [%d], index-level limit per node: [%d]",
shardRouting.index(), indexShardCount, indexShardLimit);
}
- return allocation.decision(Decision.YES, NAME, "shard count under index limit [%d] and node limit [%d] of total shards per node",
+ return allocation.decision(Decision.YES, NAME,
+ "the shard count is under index limit [%d] and cluster level node limit [%d] of total shards per node",
indexShardLimit, clusterShardLimit);
}
@@ -130,7 +132,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
final int clusterShardLimit = this.clusterShardLimit;
if (indexShardLimit <= 0 && clusterShardLimit <= 0) {
- return allocation.decision(Decision.YES, NAME, "total shard limit disabled: [index: %d, cluster: %d] <= 0",
+ return allocation.decision(Decision.YES, NAME, "total shard limits are disabled: [index: %d, cluster: %d] <= 0",
indexShardLimit, clusterShardLimit);
}
@@ -149,14 +151,16 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
// Subtle difference between the `canAllocate` and `canRemain` is that
// this checks > while canAllocate checks >=
if (clusterShardLimit > 0 && nodeShardCount > clusterShardLimit) {
- return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], limit: [%d]",
+ return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], cluster-level limit per node: [%d]",
nodeShardCount, clusterShardLimit);
}
if (indexShardLimit > 0 && indexShardCount > indexShardLimit) {
- return allocation.decision(Decision.NO, NAME, "too many shards for this index [%s] on node [%d], limit: [%d]",
+ return allocation.decision(Decision.NO, NAME,
+ "too many shards for this index [%s] on node [%d], index-level limit per node: [%d]",
shardRouting.index(), indexShardCount, indexShardLimit);
}
- return allocation.decision(Decision.YES, NAME, "shard count under index limit [%d] and node limit [%d] of total shards per node",
+ return allocation.decision(Decision.YES, NAME,
+ "the shard count is under index limit [%d] and cluster level node limit [%d] of total shards per node",
indexShardLimit, clusterShardLimit);
}
@@ -168,7 +172,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
final int clusterShardLimit = this.clusterShardLimit;
if (clusterShardLimit <= 0) {
- return allocation.decision(Decision.YES, NAME, "total shard limit disabled: [cluster: %d] <= 0",
+ return allocation.decision(Decision.YES, NAME, "total shard limits are disabled: [cluster: %d] <= 0",
clusterShardLimit);
}
@@ -181,10 +185,10 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
nodeShardCount++;
}
if (clusterShardLimit >= 0 && nodeShardCount >= clusterShardLimit) {
- return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], limit: [%d]",
+ return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], cluster-level limit per node: [%d]",
nodeShardCount, clusterShardLimit);
}
- return allocation.decision(Decision.YES, NAME, "shard count under node limit [%d] of total shards per node",
+ return allocation.decision(Decision.YES, NAME, "the shard count is under node limit [%d] of total shards per node",
clusterShardLimit);
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
index d656afc803..54cfb6407d 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
@@ -54,7 +54,8 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
}
/**
- * Creates a new {@link org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider} instance from given settings
+ * Creates a new {@link org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider} instance from
+ * given settings
*
* @param settings {@link org.elasticsearch.common.settings.Settings} to use
*/
@@ -66,7 +67,8 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
public SnapshotInProgressAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
enableRelocation = CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING.get(settings);
- clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, this::setEnableRelocation);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
+ this::setEnableRelocation);
}
private void setEnableRelocation(boolean enableRelocation) {
@@ -104,14 +106,18 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
for (SnapshotsInProgress.Entry snapshot : snapshotsInProgress.entries()) {
SnapshotsInProgress.ShardSnapshotStatus shardSnapshotStatus = snapshot.shards().get(shardRouting.shardId());
- if (shardSnapshotStatus != null && !shardSnapshotStatus.state().completed() && shardSnapshotStatus.nodeId() != null && shardSnapshotStatus.nodeId().equals(shardRouting.currentNodeId())) {
- logger.trace("Preventing snapshotted shard [{}] to be moved from node [{}]", shardRouting.shardId(), shardSnapshotStatus.nodeId());
+ if (shardSnapshotStatus != null && !shardSnapshotStatus.state().completed() && shardSnapshotStatus.nodeId() != null &&
+ shardSnapshotStatus.nodeId().equals(shardRouting.currentNodeId())) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Preventing snapshotted shard [{}] to be moved from node [{}]",
+ shardRouting.shardId(), shardSnapshotStatus.nodeId());
+ }
return allocation.decision(Decision.NO, NAME, "snapshot for shard [%s] is currently running on node [%s]",
shardRouting.shardId(), shardSnapshotStatus.nodeId());
}
}
}
- return allocation.decision(Decision.YES, NAME, "shard not primary or relocation disabled");
+ return allocation.decision(Decision.YES, NAME, "the shard is not primary or relocation is disabled");
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
index ca6b312da4..6eb44351c7 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
@@ -84,11 +84,16 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
concurrentIncomingRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.get(settings);
concurrentOutgoingRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.get(settings);
- clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries);
- clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, this::setConcurrentIncomingRecoverries);
- clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, this::setConcurrentOutgoingRecoverries);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
+ this::setPrimariesInitialRecoveries);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
+ this::setConcurrentIncomingRecoverries);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
+ this::setConcurrentOutgoingRecoverries);
- logger.debug("using node_concurrent_outgoing_recoveries [{}], node_concurrent_incoming_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentOutgoingRecoveries, concurrentIncomingRecoveries, primariesInitialRecoveries);
+ logger.debug("using node_concurrent_outgoing_recoveries [{}], node_concurrent_incoming_recoveries [{}], " +
+ "node_initial_primaries_recoveries [{}]",
+ concurrentOutgoingRecoveries, concurrentIncomingRecoveries, primariesInitialRecoveries);
}
private void setConcurrentIncomingRecoverries(int concurrentIncomingRecoveries) {
@@ -118,7 +123,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
}
}
if (primariesInRecovery >= primariesInitialRecoveries) {
- return allocation.decision(Decision.THROTTLE, NAME, "too many primaries currently recovering [%d], limit: [%d]",
+ return allocation.decision(Decision.THROTTLE, NAME, "too many primaries are currently recovering [%d], limit: [%d]",
primariesInRecovery, primariesInitialRecoveries);
} else {
return allocation.decision(Decision.YES, NAME, "below primary recovery limit of [%d]", primariesInitialRecoveries);
@@ -137,13 +142,17 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
int currentOutRecoveries = allocation.routingNodes().getOutgoingRecoveries(node.nodeId());
int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId());
if (currentOutRecoveries >= concurrentOutgoingRecoveries) {
- return allocation.decision(Decision.THROTTLE, NAME, "too many outgoing shards currently recovering [%d], limit: [%d]",
+ return allocation.decision(Decision.THROTTLE, NAME, "too many outgoing shards are currently recovering [%d], limit: [%d]",
currentOutRecoveries, concurrentOutgoingRecoveries);
} else if (currentInRecoveries >= concurrentIncomingRecoveries) {
- return allocation.decision(Decision.THROTTLE, NAME, "too many incoming shards currently recovering [%d], limit: [%d]",
+ return allocation.decision(Decision.THROTTLE, NAME, "too many incoming shards are currently recovering [%d], limit: [%d]",
currentInRecoveries, concurrentIncomingRecoveries);
} else {
- return allocation.decision(Decision.YES, NAME, "below shard recovery limit of outgoing: [%d] incoming: [%d]", concurrentOutgoingRecoveries, concurrentIncomingRecoveries);
+ return allocation.decision(Decision.YES, NAME, "below shard recovery limit of outgoing: [%d < %d] incoming: [%d < %d]",
+ currentOutRecoveries,
+ concurrentOutgoingRecoveries,
+ currentInRecoveries,
+ concurrentIncomingRecoveries);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
index 54e8535b57..8a38453eb4 100644
--- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
@@ -685,9 +685,8 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
warnAboutSlowTaskIfNeeded(executionTime, source);
} catch (Throwable t) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
- logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}{}{}", t, executionTime,
- newClusterState.version(), newClusterState.stateUUID(), source, newClusterState.nodes().prettyPrint(),
- newClusterState.routingTable().prettyPrint(), newClusterState.getRoutingNodes().prettyPrint());
+ logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", t, executionTime,
+ newClusterState.version(), newClusterState.stateUUID(), source, newClusterState.prettyPrint());
// TODO: do we want to call updateTask.onFailure here?
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java
index 97ef6561c9..658d8ed84c 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java
@@ -36,8 +36,6 @@ public class CircleBuilder extends ShapeBuilder {
public static final String FIELD_RADIUS = "radius";
public static final GeoShapeType TYPE = GeoShapeType.CIRCLE;
- public static final CircleBuilder PROTOTYPE = new CircleBuilder();
-
private DistanceUnit unit = DistanceUnit.DEFAULT;
private double radius;
private Coordinate center;
@@ -51,6 +49,21 @@ public class CircleBuilder extends ShapeBuilder {
}
/**
+ * Read from a stream.
+ */
+ public CircleBuilder(StreamInput in) throws IOException {
+ center(readFromStream(in));
+ radius(in.readDouble(), DistanceUnit.readFromStream(in));;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ writeCoordinateTo(center, out);
+ out.writeDouble(radius);
+ unit.writeTo(out);
+ }
+
+ /**
* Set the center of the circle
*
* @param center coordinate of the circles center
@@ -170,18 +183,4 @@ public class CircleBuilder extends ShapeBuilder {
Objects.equals(radius, other.radius) &&
Objects.equals(unit.ordinal(), other.unit.ordinal());
}
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- writeCoordinateTo(center, out);
- out.writeDouble(radius);
- DistanceUnit.writeDistanceUnit(out, unit);
- }
-
- @Override
- public CircleBuilder readFrom(StreamInput in) throws IOException {
- return new CircleBuilder()
- .center(readCoordinateFrom(in))
- .radius(in.readDouble(), DistanceUnit.readDistanceUnit(in));
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/CoordinateCollection.java b/core/src/main/java/org/elasticsearch/common/geo/builders/CoordinateCollection.java
index 72ac7be811..b6b9df45d0 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/CoordinateCollection.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/CoordinateCollection.java
@@ -21,9 +21,12 @@ package org.elasticsearch.common.geo.builders;
import com.vividsolutions.jts.geom.Coordinate;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
@@ -48,6 +51,25 @@ public abstract class CoordinateCollection<E extends CoordinateCollection<E>> ex
this.coordinates = coordinates;
}
+ /**
+ * Read from a stream.
+ */
+ protected CoordinateCollection(StreamInput in) throws IOException {
+ int size = in.readVInt();
+ coordinates = new ArrayList<>(size);
+ for (int i=0; i < size; i++) {
+ coordinates.add(readFromStream(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(coordinates.size());
+ for (Coordinate point : coordinates) {
+ writeCoordinateTo(point, out);
+ }
+ }
+
@SuppressWarnings("unchecked")
private E thisRef() {
return (E)this;
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
index ab997387ea..5b80ceeeee 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
@@ -33,11 +33,12 @@ public class EnvelopeBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE;
- public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(new Coordinate(-1.0, 1.0), new Coordinate(1.0, -1.0));
-
- private Coordinate topLeft;
- private Coordinate bottomRight;
+ private final Coordinate topLeft;
+ private final Coordinate bottomRight;
+ /**
+ * Build an envelope from the top left and bottom right coordinates.
+ */
public EnvelopeBuilder(Coordinate topLeft, Coordinate bottomRight) {
Objects.requireNonNull(topLeft, "topLeft of envelope cannot be null");
Objects.requireNonNull(bottomRight, "bottomRight of envelope cannot be null");
@@ -45,6 +46,20 @@ public class EnvelopeBuilder extends ShapeBuilder {
this.bottomRight = bottomRight;
}
+ /**
+ * Read from a stream.
+ */
+ public EnvelopeBuilder(StreamInput in) throws IOException {
+ topLeft = readFromStream(in);
+ bottomRight = readFromStream(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ writeCoordinateTo(topLeft, out);
+ writeCoordinateTo(bottomRight, out);
+ }
+
public Coordinate topLeft() {
return this.topLeft;
}
@@ -91,15 +106,4 @@ public class EnvelopeBuilder extends ShapeBuilder {
return Objects.equals(topLeft, other.topLeft) &&
Objects.equals(bottomRight, other.bottomRight);
}
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- writeCoordinateTo(topLeft, out);
- writeCoordinateTo(bottomRight, out);
- }
-
- @Override
- public EnvelopeBuilder readFrom(StreamInput in) throws IOException {
- return new EnvelopeBuilder(readCoordinateFrom(in), readCoordinateFrom(in));
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java
index d21f47cf05..b8559fcb48 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java
@@ -36,9 +36,34 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION;
- public static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder();
+ /**
+ * List of shapes. Package scope for testing.
+ */
+ final List<ShapeBuilder> shapes = new ArrayList<>();
- protected final ArrayList<ShapeBuilder> shapes = new ArrayList<>();
+ /**
+ * Build and empty GeometryCollectionBuilder.
+ */
+ public GeometryCollectionBuilder() {
+ }
+
+ /**
+ * Read from a stream.
+ */
+ public GeometryCollectionBuilder(StreamInput in) throws IOException {
+ int shapes = in.readVInt();
+ for (int i = 0; i < shapes; i++) {
+ shape(in.readShape());
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(shapes.size());
+ for (ShapeBuilder shape : shapes) {
+ out.writeShape(shape);
+ }
+ }
public GeometryCollectionBuilder shape(ShapeBuilder shape) {
this.shapes.add(shape);
@@ -146,23 +171,4 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
GeometryCollectionBuilder other = (GeometryCollectionBuilder) obj;
return Objects.equals(shapes, other.shapes);
}
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVInt(shapes.size());
- for (ShapeBuilder shape : shapes) {
- out.writeShape(shape);
- }
- }
-
- @Override
- public GeometryCollectionBuilder readFrom(StreamInput in) throws IOException {
- GeometryCollectionBuilder geometryCollectionBuilder = new GeometryCollectionBuilder();
- int shapes = in.readVInt();
- for (int i = 0; i < shapes; i++) {
- geometryCollectionBuilder.shape(in.readShape());
- }
- return geometryCollectionBuilder;
- }
-
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
index cbc9002c78..e79578d9ab 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
@@ -19,15 +19,14 @@
package org.elasticsearch.common.geo.builders;
-import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryFactory;
import com.vividsolutions.jts.geom.LineString;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.locationtech.spatial4j.shape.Shape;
import java.io.IOException;
import java.util.ArrayList;
@@ -36,6 +35,7 @@ import java.util.List;
import java.util.Objects;
public class LineStringBuilder extends CoordinateCollection<LineStringBuilder> {
+ public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
/**
* Construct a new LineString.
@@ -55,9 +55,12 @@ public class LineStringBuilder extends CoordinateCollection<LineStringBuilder> {
this(coordinates.build());
}
- public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
-
- public static final LineStringBuilder PROTOTYPE = new LineStringBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(1.0, 1.0));
+ /**
+ * Read from a stream.
+ */
+ public LineStringBuilder(StreamInput in) throws IOException {
+ super(in);
+ }
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
@@ -182,23 +185,4 @@ public class LineStringBuilder extends CoordinateCollection<LineStringBuilder> {
LineStringBuilder other = (LineStringBuilder) obj;
return Objects.equals(coordinates, other.coordinates);
}
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVInt(coordinates.size());
- for (Coordinate point : coordinates) {
- writeCoordinateTo(point, out);
- }
- }
-
- @Override
- public LineStringBuilder readFrom(StreamInput in) throws IOException {
- CoordinatesBuilder coordinates = new CoordinatesBuilder();
- int size = in.readVInt();
- for (int i=0; i < size; i++) {
- coordinates.coordinate(readCoordinateFrom(in));
- }
- LineStringBuilder lineStringBuilder = new LineStringBuilder(coordinates);
- return lineStringBuilder;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
index 51f4fd232c..04e25862c8 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
@@ -37,10 +37,29 @@ public class MultiLineStringBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING;
- public static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder();
-
private final ArrayList<LineStringBuilder> lines = new ArrayList<>();
+ public MultiLineStringBuilder() {
+ }
+
+ /**
+ * Read from a stream.
+ */
+ public MultiLineStringBuilder(StreamInput in) throws IOException {
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ linestring(new LineStringBuilder(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(lines.size());
+ for (LineStringBuilder line : lines) {
+ line.writeTo(out);
+ }
+ }
+
public MultiLineStringBuilder linestring(LineStringBuilder line) {
this.lines.add(line);
return this;
@@ -114,22 +133,4 @@ public class MultiLineStringBuilder extends ShapeBuilder {
MultiLineStringBuilder other = (MultiLineStringBuilder) obj;
return Objects.equals(lines, other.lines);
}
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVInt(lines.size());
- for (LineStringBuilder line : lines) {
- line.writeTo(out);
- }
- }
-
- @Override
- public MultiLineStringBuilder readFrom(StreamInput in) throws IOException {
- MultiLineStringBuilder multiLineStringBuilder = new MultiLineStringBuilder();
- int size = in.readVInt();
- for (int i = 0; i < size; i++) {
- multiLineStringBuilder.linestring(LineStringBuilder.PROTOTYPE.readFrom(in));
- }
- return multiLineStringBuilder;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java
index b8f2c8137e..f8a0624436 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java
@@ -19,14 +19,13 @@
package org.elasticsearch.common.geo.builders;
-import org.locationtech.spatial4j.shape.Point;
-import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate;
import org.elasticsearch.common.geo.XShapeCollection;
import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.locationtech.spatial4j.shape.Point;
+import org.locationtech.spatial4j.shape.Shape;
import java.io.IOException;
import java.util.ArrayList;
@@ -37,8 +36,6 @@ public class MultiPointBuilder extends CoordinateCollection<MultiPointBuilder> {
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT;
- public static final MultiPointBuilder PROTOTYPE = new MultiPointBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).build());
-
/**
* Create a new {@link MultiPointBuilder}.
* @param coordinates needs at least two coordinates to be valid, otherwise will throw an exception
@@ -47,6 +44,13 @@ public class MultiPointBuilder extends CoordinateCollection<MultiPointBuilder> {
super(coordinates);
}
+ /**
+ * Read from a stream.
+ */
+ public MultiPointBuilder(StreamInput in) throws IOException {
+ super(in);
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@@ -91,24 +95,4 @@ public class MultiPointBuilder extends CoordinateCollection<MultiPointBuilder> {
MultiPointBuilder other = (MultiPointBuilder) obj;
return Objects.equals(coordinates, other.coordinates);
}
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeVInt(coordinates.size());
- for (Coordinate point : coordinates) {
- writeCoordinateTo(point, out);
- }
- }
-
- @Override
- public MultiPointBuilder readFrom(StreamInput in) throws IOException {
- int size = in.readVInt();
- List<Coordinate> points = new ArrayList<Coordinate>(size);
- for (int i=0; i < size; i++) {
- points.add(readCoordinateFrom(in));
- }
- MultiPointBuilder multiPointBuilder = new MultiPointBuilder(points);
-
- return multiPointBuilder;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
index 6ee679b730..f5e5bca505 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
@@ -36,20 +36,45 @@ import java.util.Objects;
public class MultiPolygonBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON;
- public static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder();
- private final ArrayList<PolygonBuilder> polygons = new ArrayList<>();
+ private final List<PolygonBuilder> polygons = new ArrayList<>();
- private Orientation orientation = Orientation.RIGHT;
+ private final Orientation orientation;
+ /**
+ * Build a MultiPolygonBuilder with RIGHT orientation.
+ */
public MultiPolygonBuilder() {
this(Orientation.RIGHT);
}
+ /**
+ * Build a MultiPolygonBuilder with an arbitrary orientation.
+ */
public MultiPolygonBuilder(Orientation orientation) {
this.orientation = orientation;
}
+ /**
+ * Read from a stream.
+ */
+ public MultiPolygonBuilder(StreamInput in) throws IOException {
+ orientation = Orientation.readFrom(in);
+ int holes = in.readVInt();
+ for (int i = 0; i < holes; i++) {
+ polygon(new PolygonBuilder(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ orientation.writeTo(out);
+ out.writeVInt(polygons.size());
+ for (PolygonBuilder polygon : polygons) {
+ polygon.writeTo(out);
+ }
+ }
+
public Orientation orientation() {
return this.orientation;
}
@@ -70,7 +95,7 @@ public class MultiPolygonBuilder extends ShapeBuilder {
/**
* get the list of polygons
*/
- public ArrayList<PolygonBuilder> polygons() {
+ public List<PolygonBuilder> polygons() {
return polygons;
}
@@ -134,23 +159,4 @@ public class MultiPolygonBuilder extends ShapeBuilder {
return Objects.equals(polygons, other.polygons) &&
Objects.equals(orientation, other.orientation);
}
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- orientation.writeTo(out);
- out.writeVInt(polygons.size());
- for (PolygonBuilder polygon : polygons) {
- polygon.writeTo(out);
- }
- }
-
- @Override
- public MultiPolygonBuilder readFrom(StreamInput in) throws IOException {
- MultiPolygonBuilder polyBuilder = new MultiPolygonBuilder(Orientation.readFrom(in));
- int holes = in.readVInt();
- for (int i = 0; i < holes; i++) {
- polyBuilder.polygon(PolygonBuilder.PROTOTYPE.readFrom(in));
- }
- return polyBuilder;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java
index 30b7e370f2..fdd9826410 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java
@@ -30,9 +30,7 @@ import java.io.IOException;
import java.util.Objects;
public class PointBuilder extends ShapeBuilder {
-
public static final GeoShapeType TYPE = GeoShapeType.POINT;
- public static final PointBuilder PROTOTYPE = new PointBuilder();
private Coordinate coordinate;
@@ -43,6 +41,18 @@ public class PointBuilder extends ShapeBuilder {
this.coordinate = ZERO_ZERO;
}
+ /**
+ * Read from a stream.
+ */
+ public PointBuilder(StreamInput in) throws IOException {
+ coordinate = readFromStream(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ writeCoordinateTo(coordinate, out);
+ }
+
public PointBuilder coordinate(Coordinate coordinate) {
this.coordinate = coordinate;
return this;
@@ -91,14 +101,4 @@ public class PointBuilder extends ShapeBuilder {
PointBuilder other = (PointBuilder) obj;
return Objects.equals(coordinate, other.coordinate);
}
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- writeCoordinateTo(coordinate, out);
- }
-
- @Override
- public PointBuilder readFrom(StreamInput in) throws IOException {
- return new PointBuilder().coordinate(readCoordinateFrom(in));
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
index 52314c98ef..9fad4fb8ef 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
@@ -53,8 +53,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
public class PolygonBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.POLYGON;
- public static final PolygonBuilder PROTOTYPE = new PolygonBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).coordinate(0.0, 1.0)
- .coordinate(1.0, 0.0).coordinate(0.0, 0.0));
private static final Coordinate[][] EMPTY = new Coordinate[0][];
@@ -64,7 +62,7 @@ public class PolygonBuilder extends ShapeBuilder {
private LineStringBuilder shell;
// List of line strings defining the holes of the polygon
- private final ArrayList<LineStringBuilder> holes = new ArrayList<>();
+ private final List<LineStringBuilder> holes = new ArrayList<>();
public PolygonBuilder(LineStringBuilder lineString, Orientation orientation, boolean coerce) {
this.orientation = orientation;
@@ -87,6 +85,28 @@ public class PolygonBuilder extends ShapeBuilder {
this(coordinates, Orientation.RIGHT);
}
+ /**
+ * Read from a stream.
+ */
+ public PolygonBuilder(StreamInput in) throws IOException {
+ shell = new LineStringBuilder(in);
+ orientation = Orientation.readFrom(in);
+ int holes = in.readVInt();
+ for (int i = 0; i < holes; i++) {
+ hole(new LineStringBuilder(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ shell.writeTo(out);
+ orientation.writeTo(out);
+ out.writeVInt(holes.size());
+ for (LineStringBuilder hole : holes) {
+ hole.writeTo(out);
+ }
+ }
+
public Orientation orientation() {
return this.orientation;
}
@@ -383,10 +403,10 @@ public class PolygonBuilder extends ShapeBuilder {
return coordinates;
}
- private static Coordinate[][][] buildCoordinates(ArrayList<ArrayList<Coordinate[]>> components) {
+ private static Coordinate[][][] buildCoordinates(List<List<Coordinate[]>> components) {
Coordinate[][][] result = new Coordinate[components.size()][][];
for (int i = 0; i < result.length; i++) {
- ArrayList<Coordinate[]> component = components.get(i);
+ List<Coordinate[]> component = components.get(i);
result[i] = component.toArray(new Coordinate[component.size()][]);
}
@@ -416,13 +436,13 @@ public class PolygonBuilder extends ShapeBuilder {
return points;
}
- private static Edge[] edges(Edge[] edges, int numHoles, ArrayList<ArrayList<Coordinate[]>> components) {
+ private static Edge[] edges(Edge[] edges, int numHoles, List<List<Coordinate[]>> components) {
ArrayList<Edge> mainEdges = new ArrayList<>(edges.length);
for (int i = 0; i < edges.length; i++) {
if (edges[i].component >= 0) {
int length = component(edges[i], -(components.size()+numHoles+1), mainEdges);
- ArrayList<Coordinate[]> component = new ArrayList<>();
+ List<Coordinate[]> component = new ArrayList<>();
component.add(coordinates(edges[i], new Coordinate[length+1]));
components.add(component);
}
@@ -432,12 +452,12 @@ public class PolygonBuilder extends ShapeBuilder {
}
private static Coordinate[][][] compose(Edge[] edges, Edge[] holes, int numHoles) {
- final ArrayList<ArrayList<Coordinate[]>> components = new ArrayList<>();
+ final List<List<Coordinate[]>> components = new ArrayList<>();
assign(holes, holes(holes, numHoles), numHoles, edges(edges, numHoles, components), components);
return buildCoordinates(components);
}
- private static void assign(Edge[] holes, Coordinate[][] points, int numHoles, Edge[] edges, ArrayList<ArrayList<Coordinate[]>> components) {
+ private static void assign(Edge[] holes, Coordinate[][] points, int numHoles, Edge[] edges, List<List<Coordinate[]>> components) {
// Assign Hole to related components
// To find the new component the hole belongs to all intersections of the
// polygon edges with a vertical line are calculated. This vertical line
@@ -668,8 +688,8 @@ public class PolygonBuilder extends ShapeBuilder {
* number of points to use
* @return the edges creates
*/
- private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
- int length) {
+ private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges,
+ final int edgeOffset, int length) {
assert edges.length >= length+edgeOffset;
assert points.length >= length+pointOffset;
edges[edgeOffset] = new Edge(points[pointOffset], null);
@@ -725,26 +745,4 @@ public class PolygonBuilder extends ShapeBuilder {
Objects.equals(holes, other.holes) &&
Objects.equals(orientation, other.orientation);
}
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- shell.writeTo(out);
- orientation.writeTo(out);
- out.writeVInt(holes.size());
- for (LineStringBuilder hole : holes) {
- hole.writeTo(out);
- }
- }
-
- @Override
- public PolygonBuilder readFrom(StreamInput in) throws IOException {
- LineStringBuilder shell = LineStringBuilder.PROTOTYPE.readFrom(in);
- Orientation orientation = Orientation.readFrom(in);
- PolygonBuilder polyBuilder = new PolygonBuilder(shell, orientation);
- int holes = in.readVInt();
- for (int i = 0; i < holes; i++) {
- polyBuilder.hole(LineStringBuilder.PROTOTYPE.readFrom(in));
- }
- return polyBuilder;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
index d0c7396457..a0d77d004d 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
@@ -180,7 +180,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
out.writeDouble(coordinate.y);
}
- protected Coordinate readCoordinateFrom(StreamInput in) throws IOException {
+ protected static Coordinate readFromStream(StreamInput in) throws IOException {
return new Coordinate(in.readDouble(), in.readDouble());
}
@@ -519,7 +519,8 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
} else if (geometryCollections == null && GeoShapeType.GEOMETRYCOLLECTION == shapeType) {
throw new ElasticsearchParseException("geometries not included");
} else if (radius != null && GeoShapeType.CIRCLE != shapeType) {
- throw new ElasticsearchParseException("field [{}] is supported for [{}] only", CircleBuilder.FIELD_RADIUS, CircleBuilder.TYPE);
+ throw new ElasticsearchParseException("field [{}] is supported for [{}] only", CircleBuilder.FIELD_RADIUS,
+ CircleBuilder.TYPE);
}
switch (shapeType) {
@@ -539,7 +540,8 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
protected static void validatePointNode(CoordinateNode node) {
if (node.isEmpty()) {
- throw new ElasticsearchParseException("invalid number of points (0) provided when expecting a single coordinate ([lat, lng])");
+ throw new ElasticsearchParseException(
+ "invalid number of points (0) provided when expecting a single coordinate ([lat, lng])");
} else if (node.coordinate == null) {
if (node.children.isEmpty() == false) {
throw new ElasticsearchParseException("multipoint data provided when single point data expected.");
@@ -559,8 +561,9 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates) {
// validate the coordinate array for envelope type
if (coordinates.children.size() != 2) {
- throw new ElasticsearchParseException("invalid number of points [{}] provided for " +
- "geo_shape [{}] when expecting an array of 2 coordinates", coordinates.children.size(), GeoShapeType.ENVELOPE.shapename);
+ throw new ElasticsearchParseException(
+ "invalid number of points [{}] provided for geo_shape [{}] when expecting an array of 2 coordinates",
+ coordinates.children.size(), GeoShapeType.ENVELOPE.shapename);
}
// verify coordinate bounds, correct if necessary
Coordinate uL = coordinates.children.get(0).coordinate;
@@ -604,7 +607,8 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
* LineStringBuilder should throw a graceful exception if < 2 coordinates/points are provided
*/
if (coordinates.children.size() < 2) {
- throw new ElasticsearchParseException("invalid number of points in LineString (found [{}] - must be >= 2)", coordinates.children.size());
+ throw new ElasticsearchParseException("invalid number of points in LineString (found [{}] - must be >= 2)",
+ coordinates.children.size());
}
CoordinatesBuilder line = new CoordinatesBuilder();
@@ -636,10 +640,10 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
throw new ElasticsearchParseException(error);
}
- int numValidPts;
- if (coordinates.children.size() < (numValidPts = (coerce) ? 3 : 4)) {
- throw new ElasticsearchParseException("invalid number of points in LinearRing (found [{}] - must be >= " + numValidPts + ")(",
- coordinates.children.size());
+ int numValidPts = coerce ? 3 : 4;
+ if (coordinates.children.size() < numValidPts) {
+ throw new ElasticsearchParseException("invalid number of points in LinearRing (found [{}] - must be >= [{}])",
+ coordinates.children.size(), numValidPts);
}
if (!coordinates.children.get(0).coordinate.equals(
@@ -655,7 +659,8 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
protected static PolygonBuilder parsePolygon(CoordinateNode coordinates, final Orientation orientation, final boolean coerce) {
if (coordinates.children == null || coordinates.children.isEmpty()) {
- throw new ElasticsearchParseException("invalid LinearRing provided for type polygon. Linear ring must be an array of coordinates");
+ throw new ElasticsearchParseException(
+ "invalid LinearRing provided for type polygon. Linear ring must be an array of coordinates");
}
LineStringBuilder shell = parseLinearRing(coordinates.children.get(0), coerce);
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java
index 1c82881443..5194510bcf 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java
@@ -21,6 +21,8 @@ package org.elasticsearch.common.geo.builders;
import com.vividsolutions.jts.geom.Coordinate;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+
import java.util.List;
/**
@@ -137,4 +139,16 @@ public class ShapeBuilders {
public static EnvelopeBuilder newEnvelope(Coordinate topLeft, Coordinate bottomRight) {
return new EnvelopeBuilder(topLeft, bottomRight);
}
+
+ public static void register(NamedWriteableRegistry namedWriteableRegistry) {
+ namedWriteableRegistry.register(ShapeBuilder.class, PointBuilder.TYPE.shapeName(), PointBuilder::new);
+ namedWriteableRegistry.register(ShapeBuilder.class, CircleBuilder.TYPE.shapeName(), CircleBuilder::new);
+ namedWriteableRegistry.register(ShapeBuilder.class, EnvelopeBuilder.TYPE.shapeName(), EnvelopeBuilder::new);
+ namedWriteableRegistry.register(ShapeBuilder.class, MultiPointBuilder.TYPE.shapeName(), MultiPointBuilder::new);
+ namedWriteableRegistry.register(ShapeBuilder.class, LineStringBuilder.TYPE.shapeName(), LineStringBuilder::new);
+ namedWriteableRegistry.register(ShapeBuilder.class, MultiLineStringBuilder.TYPE.shapeName(), MultiLineStringBuilder::new);
+ namedWriteableRegistry.register(ShapeBuilder.class, PolygonBuilder.TYPE.shapeName(), PolygonBuilder::new);
+ namedWriteableRegistry.register(ShapeBuilder.class, MultiPolygonBuilder.TYPE.shapeName(), MultiPolygonBuilder::new);
+ namedWriteableRegistry.register(ShapeBuilder.class, GeometryCollectionBuilder.TYPE.shapeName(), GeometryCollectionBuilder::new);
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java
index a6d1708965..c683573df7 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java
@@ -36,7 +36,12 @@ public class NamedWriteableAwareStreamInput extends FilterStreamInput {
@Override
<C> C readNamedWriteable(Class<C> categoryClass) throws IOException {
String name = readString();
- NamedWriteable<? extends C> namedWriteable = namedWriteableRegistry.getPrototype(categoryClass, name);
- return namedWriteable.readFrom(this);
+ Writeable.Reader<? extends C> reader = namedWriteableRegistry.getReader(categoryClass, name);
+ C c = reader.read(this);
+ if (c == null) {
+ throw new IOException(
+ "Writeable.Reader [" + reader + "] returned null which is not allowed and probably means it screwed up the stream.");
+ }
+ return c;
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java
index 4201478674..5a3de923bd 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableRegistry.java
@@ -31,54 +31,70 @@ public class NamedWriteableRegistry {
private final Map<Class<?>, InnerRegistry<?>> registry = new HashMap<>();
/**
- * Registers a {@link NamedWriteable} prototype given its category
+ * Register a {@link NamedWriteable} given its category, its name, and a function to read it from the stream.
+ *
+ * This method suppresses the rawtypes warning because it intentionally using NamedWriteable instead of {@code NamedWriteable<T>} so it
+ * is easier to use and because we might be able to drop the type parameter from NamedWriteable entirely some day.
*/
- public synchronized <T> void registerPrototype(Class<T> categoryClass, NamedWriteable<? extends T> namedWriteable) {
+ @SuppressWarnings("rawtypes")
+ public synchronized <T extends NamedWriteable> void register(Class<T> categoryClass, String name,
+ Writeable.Reader<? extends T> reader) {
@SuppressWarnings("unchecked")
- InnerRegistry<T> innerRegistry = (InnerRegistry<T>)registry.get(categoryClass);
+ InnerRegistry<T> innerRegistry = (InnerRegistry<T>) registry.get(categoryClass);
if (innerRegistry == null) {
innerRegistry = new InnerRegistry<>(categoryClass);
registry.put(categoryClass, innerRegistry);
}
- innerRegistry.registerPrototype(namedWriteable);
+ innerRegistry.register(name, reader);
+ }
+
+ /**
+ * Registers a {@link NamedWriteable} prototype given its category.
+ * @deprecated Prefer {@link #register(Class, String, org.elasticsearch.common.io.stream.Writeable.Reader)}
+ */
+ @Deprecated
+ @SuppressWarnings("rawtypes") // TODO remove this method entirely before 5.0.0 GA
+ public synchronized <T extends NamedWriteable> void registerPrototype(Class<T> categoryClass,
+ NamedWriteable<? extends T> namedWriteable) {
+ register(categoryClass, namedWriteable.getWriteableName(), namedWriteable::readFrom);
}
/**
* Returns a prototype of the {@link NamedWriteable} object identified by the name provided as argument and its category
*/
- public synchronized <T> NamedWriteable<? extends T> getPrototype(Class<T> categoryClass, String name) {
+ public synchronized <T> Writeable.Reader<? extends T> getReader(Class<T> categoryClass, String name) {
@SuppressWarnings("unchecked")
InnerRegistry<T> innerRegistry = (InnerRegistry<T>)registry.get(categoryClass);
if (innerRegistry == null) {
throw new IllegalArgumentException("unknown named writeable category [" + categoryClass.getName() + "]");
}
- return innerRegistry.getPrototype(name);
+ return innerRegistry.getReader(name);
}
private static class InnerRegistry<T> {
- private final Map<String, NamedWriteable<? extends T>> registry = new HashMap<>();
+ private final Map<String, Writeable.Reader<? extends T>> registry = new HashMap<>();
private final Class<T> categoryClass;
private InnerRegistry(Class<T> categoryClass) {
this.categoryClass = categoryClass;
}
- private void registerPrototype(NamedWriteable<? extends T> namedWriteable) {
- NamedWriteable<? extends T> existingNamedWriteable = registry.get(namedWriteable.getWriteableName());
- if (existingNamedWriteable != null) {
- throw new IllegalArgumentException("named writeable of type [" + namedWriteable.getClass().getName() + "] with name [" + namedWriteable.getWriteableName() + "] " +
- "is already registered by type [" + existingNamedWriteable.getClass().getName() + "] within category [" + categoryClass.getName() + "]");
+ private void register(String name, Writeable.Reader<? extends T> reader) {
+ Writeable.Reader<? extends T> existingReader = registry.get(name);
+ if (existingReader != null) {
+ throw new IllegalArgumentException(
+ "named writeable [" + categoryClass.getName() + "][" + name + "] is already registered by [" + reader + "]");
}
- registry.put(namedWriteable.getWriteableName(), namedWriteable);
+ registry.put(name, reader);
}
- private NamedWriteable<? extends T> getPrototype(String name) {
- NamedWriteable<? extends T> namedWriteable = registry.get(name);
- if (namedWriteable == null) {
- throw new IllegalArgumentException("unknown named writeable with name [" + name + "] within category [" + categoryClass.getName() + "]");
+ private Writeable.Reader<? extends T> getReader(String name) {
+ Writeable.Reader<? extends T> reader = registry.get(name);
+ if (reader == null) {
+ throw new IllegalArgumentException("unknown named writeable [" + categoryClass.getName() + "][" + name + "]");
}
- return namedWriteable;
+ return reader;
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
index c5709db536..a5750fcc54 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
@@ -566,9 +566,14 @@ public abstract class StreamInput extends InputStream {
}
}
- public <T extends Writeable> T readOptionalWritable(Writeable.IOFunction<StreamInput, T> provider) throws IOException {
+ public <T extends Writeable> T readOptionalWriteable(Writeable.Reader<T> reader) throws IOException {
if (readBoolean()) {
- return provider.apply(this);
+ T t = reader.read(this);
+ if (t == null) {
+ throw new IOException("Writeable.Reader [" + reader
+ + "] returned null which is not allowed and probably means it screwed up the stream.");
+ }
+ return t;
} else {
return null;
}
@@ -707,6 +712,16 @@ public abstract class StreamInput extends InputStream {
}
/**
+ * Reads an optional {@link QueryBuilder}.
+ */
+ public QueryBuilder<?> readOptionalQuery() throws IOException {
+ if (readBoolean()) {
+ return readNamedWriteable(QueryBuilder.class);
+ }
+ return null;
+ }
+
+ /**
* Reads a {@link ShapeBuilder} from the current stream
*/
public ShapeBuilder readShape() throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
index 630c297edf..6b7607a3e7 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
@@ -690,6 +690,18 @@ public abstract class StreamOutput extends OutputStream {
}
/**
+ * Write an optional {@link QueryBuilder} to the stream.
+ */
+ public void writeOptionalQuery(@Nullable QueryBuilder<?> queryBuilder) throws IOException {
+ if (queryBuilder == null) {
+ writeBoolean(false);
+ } else {
+ writeBoolean(true);
+ writeQuery(queryBuilder);
+ }
+ }
+
+ /**
* Writes a {@link ShapeBuilder} to the current stream
*/
public void writeShape(ShapeBuilder shapeBuilder) throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java
index 6bb1c5653f..bd37f5ed47 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java
@@ -23,10 +23,7 @@ import java.io.IOException;
/**
* Implementers can be read from {@linkplain StreamInput} by calling their {@link #readFrom(StreamInput)} method.
*
- * It is common for implementers of this interface to declare a <code>public static final</code> instance of themselves named PROTOTYPE so
- * users can call {@linkplain #readFrom(StreamInput)} on it. It is also fairly typical for readFrom to be implemented as a method that just
- * calls a constructor that takes {@linkplain StreamInput} as a parameter. This allows the fields in the implementer to be
- * <code>final</code>.
+ * Implementers of this interface that also implement {@link Writeable} should see advice there on how to do so.
*/
public interface StreamableReader<T> {
/**
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java
index 8f0cb3c96c..75c1f28c39 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/Writeable.java
@@ -31,21 +31,31 @@ import java.io.IOException;
*
* Prefer implementing this interface over implementing {@link Streamable} where possible. Lots of code depends on {@linkplain Streamable}
* so this isn't always possible.
+ *
+ * The fact that this interface extends {@link StreamableReader} should be consider vestigial. Instead of using its
+ * {@link #readFrom(StreamInput)} method you should prefer using the Reader interface as a reference to a constructor that takes
+ * {@link StreamInput}. The reasoning behind this is that most "good" readFrom implementations just delegated to such a constructor anyway
+ * and they required an unsightly PROTOTYPE object.
*/
-public interface Writeable<T> extends StreamableReader<T> {
+public interface Writeable<T> extends StreamableReader<T> { // TODO remove extends StreamableReader<T> from this interface, and remove <T>
/**
* Write this into the {@linkplain StreamOutput}.
*/
void writeTo(StreamOutput out) throws IOException;
- @FunctionalInterface
- interface IOFunction<T, R> {
- /**
- * Applies this function to the given argument.
- *
- * @param t the function argument
- * @return the function result
- */
- R apply(T t) throws IOException;
- }
+ @Override
+ default T readFrom(StreamInput in) throws IOException {
+ // See class javadoc for reasoning
+ throw new UnsupportedOperationException("Prefer calling a constructor that takes a StreamInput to calling readFrom.");
+ }
+
+ /**
+ * Reference to a method that can read some object from a stream. By convention this is a constructor that takes
+ * {@linkplain StreamInput} as an argument for most classes and a static method for things like enums. Returning null from one of these
+ * is always wrong - for that we use methods like {@link StreamInput#readOptionalWriteable(Reader)}.
+ */
+ @FunctionalInterface
+ interface Reader<R> {
+ R read(StreamInput t) throws IOException;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
index 7e4c1348f8..4c32abe815 100644
--- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
+++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
@@ -28,6 +28,7 @@ import org.elasticsearch.client.transport.support.TransportProxyClient;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@@ -37,6 +38,7 @@ import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler;
+import org.elasticsearch.rest.action.admin.cluster.allocation.RestClusterAllocationExplainAction;
import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction;
import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
@@ -170,6 +172,7 @@ public class NetworkModule extends AbstractModule {
RestNodesInfoAction.class,
RestNodesStatsAction.class,
RestNodesHotThreadsAction.class,
+ RestClusterAllocationExplainAction.class,
RestClusterStatsAction.class,
RestClusterStateAction.class,
RestClusterHealthAction.class,
@@ -328,7 +331,7 @@ public class NetworkModule extends AbstractModule {
registerTransportService(NETTY_TRANSPORT, TransportService.class);
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
registerTransport(NETTY_TRANSPORT, NettyTransport.class);
- registerTaskStatus(ReplicationTask.Status.PROTOTYPE);
+ registerTaskStatus(ReplicationTask.Status.NAME, ReplicationTask.Status::new);
if (transportClient == false) {
registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class);
@@ -374,8 +377,8 @@ public class NetworkModule extends AbstractModule {
}
}
- public void registerTaskStatus(Task.Status prototype) {
- namedWriteableRegistry.registerPrototype(Task.Status.class, prototype);
+ public void registerTaskStatus(String name, Writeable.Reader<? extends Task.Status> reader) {
+ namedWriteableRegistry.register(Task.Status.class, name, reader);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
index 14e7958661..5f587cc270 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
@@ -344,9 +344,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
- ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
- ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
+ ZenDiscovery.MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING,
diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java
index f6f77192c7..02f7a5c37a 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/loader/JsonSettingsLoader.java
@@ -27,6 +27,10 @@ import org.elasticsearch.common.xcontent.XContentType;
*/
public class JsonSettingsLoader extends XContentSettingsLoader {
+ public JsonSettingsLoader(boolean allowNullValues) {
+ super(allowNullValues);
+ }
+
@Override
public XContentType contentType() {
return XContentType.JSON;
diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java
index 57c9419f5b..6ee1f58cf4 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoader.java
@@ -24,10 +24,12 @@ import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.io.stream.StreamInput;
+import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
+import java.util.function.Supplier;
/**
* Settings loader that loads (parses) the settings in a properties format.
@@ -36,42 +38,49 @@ public class PropertiesSettingsLoader implements SettingsLoader {
@Override
public Map<String, String> load(String source) throws IOException {
- Properties props = new NoDuplicatesProperties();
- FastStringReader reader = new FastStringReader(source);
- try {
- props.load(reader);
- Map<String, String> result = new HashMap<>();
- for (Map.Entry entry : props.entrySet()) {
- result.put((String) entry.getKey(), (String) entry.getValue());
- }
- return result;
- } finally {
- IOUtils.closeWhileHandlingException(reader);
- }
+ return load(() -> new FastStringReader(source), (reader, props) -> props.load(reader));
}
@Override
public Map<String, String> load(byte[] source) throws IOException {
- Properties props = new NoDuplicatesProperties();
- StreamInput stream = StreamInput.wrap(source);
+ return load(() -> StreamInput.wrap(source), (inStream, props) -> props.load(inStream));
+ }
+
+ private final <T extends Closeable> Map<String, String> load(
+ Supplier<T> supplier,
+ IOExceptionThrowingBiConsumer<T, Properties> properties
+ ) throws IOException {
+ T t = null;
try {
- props.load(stream);
- Map<String, String> result = new HashMap<>();
+ t = supplier.get();
+ final Properties props = new NoDuplicatesProperties();
+ properties.accept(t, props);
+ final Map<String, String> result = new HashMap<>();
for (Map.Entry entry : props.entrySet()) {
result.put((String) entry.getKey(), (String) entry.getValue());
}
return result;
} finally {
- IOUtils.closeWhileHandlingException(stream);
+ IOUtils.closeWhileHandlingException(t);
}
}
+ @FunctionalInterface
+ private interface IOExceptionThrowingBiConsumer<T, U> {
+ void accept(T t, U u) throws IOException;
+ }
+
class NoDuplicatesProperties extends Properties {
@Override
public synchronized Object put(Object key, Object value) {
- Object previousValue = super.put(key, value);
+ final Object previousValue = super.put(key, value);
if (previousValue != null) {
- throw new ElasticsearchParseException("duplicate settings key [{}] found, previous value [{}], current value [{}]", key, previousValue, value);
+ throw new ElasticsearchParseException(
+ "duplicate settings key [{}] found, previous value [{}], current value [{}]",
+ key,
+ previousValue,
+ value
+ );
}
return previousValue;
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java b/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java
index e55cb1092f..5bf9916ee0 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/loader/SettingsLoaderFactory.java
@@ -20,43 +20,63 @@
package org.elasticsearch.common.settings.loader;
/**
- * A settings loader factory automatically trying to identify what type of
- * {@link SettingsLoader} to use.
- *
- *
+ * A class holding factory methods for settings loaders that attempts
+ * to infer the type of the underlying settings content.
*/
public final class SettingsLoaderFactory {
private SettingsLoaderFactory() {
-
}
/**
- * Returns a {@link SettingsLoader} based on the resource name.
+ * Returns a {@link SettingsLoader} based on the source resource
+ * name. This factory method assumes that if the resource name ends
+ * with ".json" then the content should be parsed as JSON, else if
+ * the resource name ends with ".yml" or ".yaml" then the content
+ * should be parsed as YAML, else if the resource name ends with
+ * ".properties" then the content should be parsed as properties,
+ * otherwise default to attempting to parse as JSON. Note that the
+ * parsers returned by this method will not accept null-valued
+ * keys.
+ *
+ * @param resourceName The resource name containing the settings
+ * content.
+ * @return A settings loader.
*/
public static SettingsLoader loaderFromResource(String resourceName) {
if (resourceName.endsWith(".json")) {
- return new JsonSettingsLoader();
+ return new JsonSettingsLoader(false);
} else if (resourceName.endsWith(".yml") || resourceName.endsWith(".yaml")) {
- return new YamlSettingsLoader();
+ return new YamlSettingsLoader(false);
} else if (resourceName.endsWith(".properties")) {
return new PropertiesSettingsLoader();
} else {
// lets default to the json one
- return new JsonSettingsLoader();
+ return new JsonSettingsLoader(false);
}
}
/**
- * Returns a {@link SettingsLoader} based on the actual settings source.
+ * Returns a {@link SettingsLoader} based on the source content.
+ * This factory method assumes that if the underlying content
+ * contains an opening and closing brace ('{' and '}') then the
+ * content should be parsed as JSON, else if the underlying content
+ * fails this condition but contains a ':' then the content should
+ * be parsed as YAML, and otherwise should be parsed as properties.
+ * Note that the JSON and YAML parsers returned by this method will
+ * accept null-valued keys.
+ *
+ * @param source The underlying settings content.
+ * @return A settings loader.
*/
public static SettingsLoader loaderFromSource(String source) {
if (source.indexOf('{') != -1 && source.indexOf('}') != -1) {
- return new JsonSettingsLoader();
+ return new JsonSettingsLoader(true);
}
if (source.indexOf(':') != -1) {
- return new YamlSettingsLoader();
+ return new YamlSettingsLoader(true);
}
return new PropertiesSettingsLoader();
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java
index 9c2f973b96..3875c1ef85 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java
@@ -38,6 +38,12 @@ public abstract class XContentSettingsLoader implements SettingsLoader {
public abstract XContentType contentType();
+ private final boolean allowNullValues;
+
+ XContentSettingsLoader(boolean allowNullValues) {
+ this.allowNullValues = allowNullValues;
+ }
+
@Override
public Map<String, String> load(String source) throws IOException {
try (XContentParser parser = XContentFactory.xContent(contentType()).createParser(source)) {
@@ -153,6 +159,16 @@ public abstract class XContentSettingsLoader implements SettingsLoader {
currentValue
);
}
+
+ if (currentValue == null && !allowNullValues) {
+ throw new ElasticsearchParseException(
+ "null-valued setting found for key [{}] found at line number [{}], column number [{}]",
+ key,
+ parser.getTokenLocation().lineNumber,
+ parser.getTokenLocation().columnNumber
+ );
+ }
+
settings.put(key, currentValue);
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java
index 248fe090b5..12cde97669 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/loader/YamlSettingsLoader.java
@@ -30,6 +30,10 @@ import java.util.Map;
*/
public class YamlSettingsLoader extends XContentSettingsLoader {
+ public YamlSettingsLoader(boolean allowNullValues) {
+ super(allowNullValues);
+ }
+
@Override
public XContentType contentType() {
return XContentType.YAML;
diff --git a/core/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java b/core/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java
index d0e91646c0..b34c1101f9 100644
--- a/core/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java
+++ b/core/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java
@@ -211,34 +211,6 @@ public enum DistanceUnit implements Writeable<DistanceUnit> {
}
/**
- * Write a {@link DistanceUnit} to a {@link StreamOutput}
- *
- * @param out {@link StreamOutput} to write to
- * @param unit {@link DistanceUnit} to write
- */
- public static void writeDistanceUnit(StreamOutput out, DistanceUnit unit) throws IOException {
- out.writeByte((byte) unit.ordinal());
- }
-
- /**
- * Read a {@link DistanceUnit} from a {@link StreamInput}
- *
- * @param in {@link StreamInput} to read the {@link DistanceUnit} from
- * @return {@link DistanceUnit} read from the {@link StreamInput}
- * @throws IOException if no unit can be read from the {@link StreamInput}
- * @throws IllegalArgumentException if no matching {@link DistanceUnit} can be found
- */
- public static DistanceUnit readDistanceUnit(StreamInput in) throws IOException {
- byte b = in.readByte();
-
- if(b<0 || b>=values().length) {
- throw new IllegalArgumentException("No type for distance unit matching [" + b + "]");
- } else {
- return values()[b];
- }
- }
-
- /**
* This class implements a value+unit tuple.
*/
public static class Distance implements Comparable<Distance> {
@@ -324,23 +296,30 @@ public enum DistanceUnit implements Writeable<DistanceUnit> {
}
}
- private static final DistanceUnit PROTOTYPE = DEFAULT;
+ /**
+ * Read a {@link DistanceUnit} from a {@link StreamInput}.
+ *
+ * @param in {@link StreamInput} to read the {@link DistanceUnit} from
+ * @return {@link DistanceUnit} read from the {@link StreamInput}
+ * @throws IOException if no unit can be read from the {@link StreamInput}
+ * @throws IllegalArgumentException if no matching {@link DistanceUnit} can be found
+ */
+ public static DistanceUnit readFromStream(StreamInput in) throws IOException {
+ byte b = in.readByte();
- @Override
- public DistanceUnit readFrom(StreamInput in) throws IOException {
- int ordinal = in.readVInt();
- if (ordinal < 0 || ordinal >= values().length) {
- throw new IOException("Unknown DistanceUnit ordinal [" + ordinal + "]");
+ if (b < 0 || b >= values().length) {
+ throw new IllegalArgumentException("No type for distance unit matching [" + b + "]");
}
- return values()[ordinal];
- }
-
- public static DistanceUnit readUnitFrom(StreamInput in) throws IOException {
- return PROTOTYPE.readFrom(in);
+ return values()[b];
}
+ /**
+ * Write a {@link DistanceUnit} to a {@link StreamOutput}.
+ *
+ * @param out {@link StreamOutput} to write to
+ */
@Override
public void writeTo(StreamOutput out) throws IOException {
- out.writeVInt(this.ordinal());
+ out.writeByte((byte) this.ordinal());
}
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
index 07cd3853cb..6e0f17812c 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
@@ -71,6 +71,7 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@@ -78,6 +79,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.Collectors;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
@@ -100,14 +102,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, Property.NodeScope);
public final static Setting<Boolean> SEND_LEAVE_REQUEST_SETTING =
Setting.boolSetting("discovery.zen.send_leave_request", true, Property.NodeScope);
- public final static Setting<Boolean> MASTER_ELECTION_FILTER_CLIENT_SETTING =
- Setting.boolSetting("discovery.zen.master_election.filter_client", true, Property.NodeScope);
public final static Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING =
Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0),
Property.NodeScope);
- public final static Setting<Boolean> MASTER_ELECTION_FILTER_DATA_SETTING =
- Setting.boolSetting("discovery.zen.master_election.filter_data", false, Property.NodeScope);
+ public final static Setting<Boolean> MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING =
+ Setting.boolSetting("discovery.zen.master_election.ignore_non_master_pings", false, Property.NodeScope);
public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin";
@@ -138,8 +138,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
private final ElectMasterService electMaster;
- private final boolean masterElectionFilterClientNodes;
- private final boolean masterElectionFilterDataNodes;
+ private final boolean masterElectionIgnoreNonMasters;
private final TimeValue masterElectionWaitForJoinsTimeout;
private final JoinThreadControl joinThreadControl;
@@ -169,11 +168,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
this.maxPingsFromAnotherMaster = MAX_PINGS_FROM_ANOTHER_MASTER_SETTING.get(settings);
this.sendLeaveRequest = SEND_LEAVE_REQUEST_SETTING.get(settings);
- this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings);
- this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings);
+ this.masterElectionIgnoreNonMasters = MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING.get(settings);
this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings);
- logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
+ logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.ignore_non_master [{}]",
+ this.pingTimeout, joinTimeout, masterElectionIgnoreNonMasters);
clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> {
final ClusterState clusterState = clusterService.state();
@@ -846,30 +845,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
}
// filter responses
- List<ZenPing.PingResponse> pingResponses = new ArrayList<>();
- for (ZenPing.PingResponse pingResponse : fullPingResponses) {
- DiscoveryNode node = pingResponse.node();
- if (masterElectionFilterClientNodes && (node.clientNode() || (!node.masterNode() && !node.dataNode()))) {
- // filter out the client node, which is a client node, or also one that is not data and not master (effectively, client)
- } else if (masterElectionFilterDataNodes && (!node.masterNode() && node.dataNode())) {
- // filter out data node that is not also master
- } else {
- pingResponses.add(pingResponse);
- }
- }
-
- if (logger.isDebugEnabled()) {
- StringBuilder sb = new StringBuilder();
- if (pingResponses.isEmpty()) {
- sb.append(" {none}");
- } else {
- for (ZenPing.PingResponse pingResponse : pingResponses) {
- sb.append("\n\t--> ").append(pingResponse);
- }
- }
- logger.debug("filtered ping responses: (filter_client[{}], filter_data[{}]){}", masterElectionFilterClientNodes,
- masterElectionFilterDataNodes, sb);
- }
+ final List<ZenPing.PingResponse> pingResponses;
+ pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger);
final DiscoveryNode localNode = clusterService.localNode();
List<DiscoveryNode> pingMasters = new ArrayList<>();
@@ -925,6 +902,28 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
}
}
+ static List<ZenPing.PingResponse> filterPingResponses(ZenPing.PingResponse[] fullPingResponses, boolean masterElectionIgnoreNonMasters, ESLogger logger) {
+ List<ZenPing.PingResponse> pingResponses;
+ if (masterElectionIgnoreNonMasters) {
+ pingResponses = Arrays.stream(fullPingResponses).filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList());
+ } else {
+ pingResponses = Arrays.asList(fullPingResponses);
+ }
+
+ if (logger.isDebugEnabled()) {
+ StringBuilder sb = new StringBuilder();
+ if (pingResponses.isEmpty()) {
+ sb.append(" {none}");
+ } else {
+ for (ZenPing.PingResponse pingResponse : pingResponses) {
+ sb.append("\n\t--> ").append(pingResponse);
+ }
+ }
+ logger.debug("filtered ping responses: (ignore_non_masters [{}]){}", masterElectionIgnoreNonMasters, sb);
+ }
+ return pingResponses;
+ }
+
protected ClusterState rejoin(ClusterState clusterState, String reason) {
// *** called from within an cluster state update task *** //
diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java
index b2c218ae10..815f257a45 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexService.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexService.java
@@ -19,18 +19,6 @@
package org.elasticsearch.index;
-import java.io.Closeable;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
@@ -82,6 +70,18 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.threadpool.ThreadPool;
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
import static java.util.Collections.emptyMap;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
@@ -621,6 +621,11 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
rescheduleFsyncTask(durability);
}
}
+
+ // update primary terms
+ for (final IndexShard shard : this.shards.values()) {
+ shard.updatePrimaryTerm(metadata.primaryTerm(shard.shardId().id()));
+ }
}
private void rescheduleFsyncTask(Translog.Durability durability) {
@@ -780,7 +785,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
}
@Override
- public void close() {
+ public synchronized void close() {
if (closed.compareAndSet(false, true)) {
FutureUtils.cancel(scheduledFuture);
scheduledFuture = null;
diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java
index c66073bd91..5b6d27ce24 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java
@@ -927,82 +927,6 @@ public abstract class Engine implements Closeable {
}
}
- public static class DeleteByQuery {
- private final Query query;
- private final BytesReference source;
- private final String[] filteringAliases;
- private final Query aliasFilter;
- private final String[] types;
- private final BitSetProducer parentFilter;
- private final Operation.Origin origin;
-
- private final long startTime;
- private long endTime;
-
- public DeleteByQuery(Query query, BytesReference source, @Nullable String[] filteringAliases, @Nullable Query aliasFilter, BitSetProducer parentFilter, Operation.Origin origin, long startTime, String... types) {
- this.query = query;
- this.source = source;
- this.types = types;
- this.filteringAliases = filteringAliases;
- this.aliasFilter = aliasFilter;
- this.parentFilter = parentFilter;
- this.startTime = startTime;
- this.origin = origin;
- }
-
- public Query query() {
- return this.query;
- }
-
- public BytesReference source() {
- return this.source;
- }
-
- public String[] types() {
- return this.types;
- }
-
- public String[] filteringAliases() {
- return filteringAliases;
- }
-
- public Query aliasFilter() {
- return aliasFilter;
- }
-
- public boolean nested() {
- return parentFilter != null;
- }
-
- public BitSetProducer parentFilter() {
- return parentFilter;
- }
-
- public Operation.Origin origin() {
- return this.origin;
- }
-
- /**
- * Returns operation start time in nanoseconds.
- */
- public long startTime() {
- return this.startTime;
- }
-
- public DeleteByQuery endTime(long endTime) {
- this.endTime = endTime;
- return this;
- }
-
- /**
- * Returns operation end time in nanoseconds.
- */
- public long endTime() {
- return this.endTime;
- }
- }
-
-
public static class Get {
private final boolean realtime;
private final Term uid;
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
index 0c9f2daa6c..0f9c31d75d 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
@@ -614,7 +614,8 @@ final class DocumentParser implements Closeable {
} else if (fieldType instanceof TextFieldType) {
builder = context.root().findTemplateBuilder(context, currentFieldName, "text", "string");
if (builder == null) {
- builder = new TextFieldMapper.Builder(currentFieldName);
+ builder = new TextFieldMapper.Builder(currentFieldName)
+ .addMultiField(new KeywordFieldMapper.Builder("keyword").ignoreAbove(256));
}
} else if (fieldType instanceof KeywordFieldType) {
builder = context.root().findTemplateBuilder(context, currentFieldName, "keyword", "string");
@@ -714,7 +715,8 @@ final class DocumentParser implements Closeable {
}
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string");
if (builder == null) {
- builder = new TextFieldMapper.Builder(currentFieldName);
+ builder = new TextFieldMapper.Builder(currentFieldName)
+ .addMultiField(new KeywordFieldMapper.Builder("keyword").ignoreAbove(256));
}
return builder;
} else if (token == XContentParser.Token.VALUE_NUMBER) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
index eaf897e7fb..73b94e60b4 100755
--- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
@@ -283,6 +283,11 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
if (reason == MergeReason.MAPPING_UPDATE) {
+ // this check will only be performed on the master node when there is
+ // a call to the update mapping API. For all other cases like
+ // the master node restoring mappings from disk or data nodes
+ // deserializing cluster state that was sent by the master node,
+ // this check will be skipped.
checkNestedFieldsLimit(fullPathObjectMappers);
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
index b9e3434fc2..e2b690caca 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
@@ -22,15 +22,16 @@ package org.elasticsearch.index.mapper.internal;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -39,12 +40,12 @@ import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
-import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.query.QueryShardContext;
import java.io.IOException;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
/**
*
@@ -133,12 +134,55 @@ public class TypeFieldMapper extends MetadataFieldMapper {
@Override
public Query termQuery(Object value, @Nullable QueryShardContext context) {
if (indexOptions() == IndexOptions.NONE) {
- return new ConstantScoreQuery(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value)))));
+ throw new AssertionError();
}
- return new ConstantScoreQuery(new TermQuery(createTerm(value)));
+ return new TypeQuery(indexedValueForSearch(value));
}
}
+ public static class TypeQuery extends Query {
+
+ private final BytesRef type;
+
+ public TypeQuery(BytesRef type) {
+ this.type = Objects.requireNonNull(type);
+ }
+
+ @Override
+ public Query rewrite(IndexReader reader) throws IOException {
+ Term term = new Term(CONTENT_TYPE, type);
+ TermContext context = TermContext.build(reader.getContext(), term);
+ if (context.docFreq() == reader.maxDoc()) {
+ // All docs have the same type.
+ // Using a match_all query will help Lucene perform some optimizations
+ // For instance, match_all queries as filter clauses are automatically removed
+ return new MatchAllDocsQuery();
+ } else {
+ return new ConstantScoreQuery(new TermQuery(term, context));
+ }
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (super.equals(obj) == false) {
+ return false;
+ }
+ TypeQuery that = (TypeQuery) obj;
+ return type.equals(that.type);
+ }
+
+ @Override
+ public int hashCode() {
+ return 31 * super.hashCode() + type.hashCode();
+ }
+
+ @Override
+ public String toString(String field) {
+ return "_type:" + type;
+ }
+
+ }
+
private TypeFieldMapper(Settings indexSettings, MappedFieldType existing) {
this(existing == null ? defaultFieldType(indexSettings) : existing.clone(),
indexSettings);
diff --git a/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java b/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java
index a8669c98cd..3218837261 100644
--- a/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java
+++ b/core/src/main/java/org/elasticsearch/index/percolator/ExtractQueryTermsService.java
@@ -27,6 +27,8 @@ import org.apache.lucene.index.PrefixCodedTerms;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.queries.BlendedTermQuery;
+import org.apache.lucene.queries.CommonTermsQuery;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -162,6 +164,12 @@ public final class ExtractQueryTermsService {
} else if (query instanceof BoostQuery) {
Query wrappedQuery = ((BoostQuery) query).getQuery();
return extractQueryTerms(wrappedQuery);
+ } else if (query instanceof CommonTermsQuery) {
+ List<Term> terms = ((CommonTermsQuery) query).getTerms();
+ return new HashSet<>(terms);
+ } else if (query instanceof BlendedTermQuery) {
+ List<Term> terms = ((BlendedTermQuery) query).getTerms();
+ return new HashSet<>(terms);
} else {
throw new UnsupportedQueryException(query);
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java
index 975736e842..12fd0e1a9b 100644
--- a/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java
@@ -20,6 +20,7 @@
package org.elasticsearch.index.query;
import org.apache.lucene.index.Term;
+import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
@@ -74,15 +75,14 @@ public class TypeQueryBuilder extends AbstractQueryBuilder<TypeQueryBuilder> {
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
- Query filter;
//LUCENE 4 UPGRADE document mapper should use bytesref as well?
DocumentMapper documentMapper = context.getMapperService().documentMapper(type.utf8ToString());
if (documentMapper == null) {
- filter = new TermQuery(new Term(TypeFieldMapper.NAME, type));
+ // no type means no documents
+ return new MatchNoDocsQuery();
} else {
- filter = documentMapper.typeFilter();
+ return documentMapper.typeFilter();
}
- return filter;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java b/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java
index 626e72acf4..e632c0669f 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/IllegalIndexShardStateException.java
@@ -33,12 +33,12 @@ public class IllegalIndexShardStateException extends ElasticsearchException {
private final IndexShardState currentState;
- public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg) {
- this(shardId, currentState, msg, null);
+ public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg, Object... args) {
+ this(shardId, currentState, msg, null, args);
}
- public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg, Throwable ex) {
- super("CurrentState[" + currentState + "] " + msg, ex);
+ public IllegalIndexShardStateException(ShardId shardId, IndexShardState currentState, String msg, Throwable ex, Object... args) {
+ super("CurrentState[" + currentState + "] " + msg, ex, args);
setShard(shardId);
this.currentState = currentState;
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index b667a1de68..5a764a1207 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -41,6 +41,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.settings.Settings;
@@ -144,13 +145,16 @@ public class IndexShard extends AbstractIndexShardComponent {
private final TranslogConfig translogConfig;
private final IndexEventListener indexEventListener;
- /** How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this
- * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents
- * being indexed/deleted. */
+ /**
+ * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this
+ * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents
+ * being indexed/deleted.
+ */
private final AtomicLong writingBytes = new AtomicLong();
protected volatile ShardRouting shardRouting;
protected volatile IndexShardState state;
+ protected volatile long primaryTerm;
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
protected final EngineFactory engineFactory;
@@ -236,13 +240,16 @@ public class IndexShard extends AbstractIndexShardComponent {
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
this.suspendableRefContainer = new SuspendableRefContainer();
this.searcherWrapper = indexSearcherWrapper;
+ this.primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id());
}
public Store store() {
return this.store;
}
- /** returns true if this shard supports indexing (i.e., write) operations. */
+ /**
+ * returns true if this shard supports indexing (i.e., write) operations.
+ */
public boolean canIndex() {
return true;
}
@@ -279,6 +286,30 @@ public class IndexShard extends AbstractIndexShardComponent {
return this.shardFieldData;
}
+
+ /**
+ * Returns the primary term the index shard is on. See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)}
+ */
+ public long getPrimaryTerm() {
+ return this.primaryTerm;
+ }
+
+ /**
+ * notifies the shard of an increase in the primary term
+ */
+ public void updatePrimaryTerm(final long newTerm) {
+ synchronized (mutex) {
+ if (newTerm != primaryTerm) {
+ assert shardRouting.primary() == false : "a primary shard should never update it's term. shard: " + shardRouting
+ + " current term [" + primaryTerm + "] new term [" + newTerm + "]";
+ assert newTerm > primaryTerm : "primary terms can only go up. current [" + primaryTerm + "], new [" + newTerm + "]";
+ primaryTerm = newTerm;
+ }
+ }
+
+
+ }
+
/**
* Returns the latest cluster routing entry received with this shard. Might be null if the
* shard was just created.
@@ -297,12 +328,12 @@ public class IndexShard extends AbstractIndexShardComponent {
* unless explicitly disabled.
*
* @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted
- * @throws IOException if shard state could not be persisted
+ * @throws IOException if shard state could not be persisted
*/
public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) throws IOException {
final ShardRouting currentRouting = this.shardRouting;
if (!newRouting.shardId().equals(shardId())) {
- throw new IllegalArgumentException("Trying to set a routing entry with shardId [" + newRouting.shardId() + "] on a shard with shardId [" + shardId() + "]");
+ throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId() + "");
}
if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) {
throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting);
@@ -419,9 +450,7 @@ public class IndexShard extends AbstractIndexShardComponent {
public Engine.Index prepareIndexOnPrimary(SourceToParse source, long version, VersionType versionType) {
try {
- if (shardRouting.primary() == false) {
- throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary");
- }
+ verifyPrimary();
return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.PRIMARY);
} catch (Throwable t) {
verifyNotClosed(t);
@@ -431,6 +460,7 @@ public class IndexShard extends AbstractIndexShardComponent {
public Engine.Index prepareIndexOnReplica(SourceToParse source, long version, VersionType versionType) {
try {
+ verifyReplicationTarget();
return prepareIndex(docMapper(source.type()), source, version, versionType, Engine.Operation.Origin.REPLICA);
} catch (Throwable t) {
verifyNotClosed(t);
@@ -474,9 +504,7 @@ public class IndexShard extends AbstractIndexShardComponent {
}
public Engine.Delete prepareDeleteOnPrimary(String type, String id, long version, VersionType versionType) {
- if (shardRouting.primary() == false) {
- throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary");
- }
+ verifyPrimary();
final DocumentMapper documentMapper = docMapper(type).getDocumentMapper();
return prepareDelete(type, id, documentMapper.uidMapper().term(Uid.createUid(type, id)), version, versionType, Engine.Operation.Origin.PRIMARY);
}
@@ -515,7 +543,9 @@ public class IndexShard extends AbstractIndexShardComponent {
return getEngine().get(get, this::acquireSearcher);
}
- /** Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link EngineClosedException}. */
+ /**
+ * Writes all indexing changes to disk and opens a new searcher reflecting all changes. This can throw {@link EngineClosedException}.
+ */
public void refresh(String source) {
verifyNotClosed();
if (canIndex()) {
@@ -538,7 +568,9 @@ public class IndexShard extends AbstractIndexShardComponent {
}
}
- /** Returns how many bytes we are currently moving from heap to disk */
+ /**
+ * Returns how many bytes we are currently moving from heap to disk
+ */
public long getWritingBytes() {
return writingBytes.get();
}
@@ -940,6 +972,22 @@ public class IndexShard extends AbstractIndexShardComponent {
}
}
+ private void verifyPrimary() {
+ if (shardRouting.primary() == false) {
+ // must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException
+ throw new IllegalStateException("shard is not a primary " + shardRouting);
+ }
+ }
+
+ private void verifyReplicationTarget() {
+ final IndexShardState state = state();
+ if (shardRouting.primary() && shardRouting.active() && state != IndexShardState.RELOCATED) {
+ // must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException
+ throw new IllegalStateException("active primary shard cannot be a replication target before " +
+ " relocation hand off " + shardRouting + ", state is [" + state + "]");
+ }
+ }
+
protected final void verifyStartedOrRecovering() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state != IndexShardState.STARTED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
@@ -969,7 +1017,9 @@ public class IndexShard extends AbstractIndexShardComponent {
}
}
- /** Returns number of heap bytes used by the indexing buffer for this shard, or 0 if the shard is closed */
+ /**
+ * Returns number of heap bytes used by the indexing buffer for this shard, or 0 if the shard is closed
+ */
public long getIndexBufferRAMBytesUsed() {
Engine engine = getEngineOrNull();
if (engine == null) {
@@ -986,8 +1036,10 @@ public class IndexShard extends AbstractIndexShardComponent {
this.shardEventListener.delegates.add(onShardFailure);
}
- /** Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last
- * indexing operation, and notify listeners that we are now inactive so e.g. sync'd flush can happen. */
+ /**
+ * Called by {@link IndexingMemoryController} to check whether more than {@code inactiveTimeNS} has passed since the last
+ * indexing operation, and notify listeners that we are now inactive so e.g. sync'd flush can happen.
+ */
public void checkIdle(long inactiveTimeNS) {
Engine engineOrNull = getEngineOrNull();
if (engineOrNull != null && System.nanoTime() - engineOrNull.getLastWriteNanos() >= inactiveTimeNS) {
@@ -1132,11 +1184,12 @@ public class IndexShard extends AbstractIndexShardComponent {
}
} catch (Exception e) {
handleRefreshException(e);
- };
+ }
}
/**
* Should be called for each no-op update operation to increment relevant statistics.
+ *
* @param type the doc type of the update
*/
public void noopUpdate(String type) {
@@ -1336,14 +1389,22 @@ public class IndexShard extends AbstractIndexShardComponent {
public Releasable acquirePrimaryOperationLock() {
verifyNotClosed();
- if (shardRouting.primary() == false) {
- throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary");
- }
+ verifyPrimary();
return suspendableRefContainer.acquireUninterruptibly();
}
- public Releasable acquireReplicaOperationLock() {
+ /**
+ * acquires operation log. If the given primary term is lower then the one in {@link #shardRouting}
+ * an {@link IllegalArgumentException} is thrown.
+ */
+ public Releasable acquireReplicaOperationLock(long opPrimaryTerm) {
verifyNotClosed();
+ verifyReplicationTarget();
+ if (primaryTerm > opPrimaryTerm) {
+ // must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException
+ throw new IllegalArgumentException(LoggerMessageFormat.format("{} operation term [{}] is too old (current [{}])",
+ shardId, opPrimaryTerm, primaryTerm));
+ }
return suspendableRefContainer.acquireUninterruptibly();
}
@@ -1447,7 +1508,7 @@ public class IndexShard extends AbstractIndexShardComponent {
* Returns <code>true</code> iff one or more changes to the engine are not visible to via the current searcher.
* Otherwise <code>false</code>.
*
- * @throws EngineClosedException if the engine is already closed
+ * @throws EngineClosedException if the engine is already closed
* @throws AlreadyClosedException if the internal indexwriter in the engine is already closed
*/
public boolean isRefreshNeeded() {
diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
index 46ead3fbf3..82f1466bf1 100644
--- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
+++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
@@ -19,7 +19,6 @@
package org.elasticsearch.indices.cluster;
-import com.carrotsearch.hppc.IntHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
@@ -34,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RestoreSource;
+import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
@@ -71,9 +71,11 @@ import org.elasticsearch.snapshots.RestoreService;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.Arrays;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.ConcurrentMap;
/**
@@ -90,7 +92,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private final NodeMappingRefreshAction nodeMappingRefreshAction;
private final NodeServicesProvider nodeServicesProvider;
- private static final ShardStateAction.Listener SHARD_STATE_ACTION_LISTENER = new ShardStateAction.Listener() {};
+ private static final ShardStateAction.Listener SHARD_STATE_ACTION_LISTENER = new ShardStateAction.Listener() {
+ };
// a list of shards that failed during recovery
// we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update
@@ -174,41 +177,44 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
cleanFailedShards(event);
+ // cleaning up indices that are completely deleted so we won't need to worry about them
+ // when checking for shards
applyDeletedIndices(event);
+ applyDeletedShards(event);
+ // call after deleted shards so indices with no shards will be cleaned
+ applyCleanedIndices(event);
+ // make sure that newly created shards use the latest meta data
+ applyIndexMetaData(event);
applyNewIndices(event);
+ // apply mappings also updates new indices. TODO: make new indices good to begin with
applyMappings(event);
applyNewOrUpdatedShards(event);
- applyDeletedShards(event);
- applyCleanedIndices(event);
- applySettings(event);
}
}
- private void applyCleanedIndices(final ClusterChangedEvent event) {
- // handle closed indices, since they are not allocated on a node once they are closed
- // so applyDeletedIndices might not take them into account
- for (IndexService indexService : indicesService) {
- Index index = indexService.index();
- IndexMetaData indexMetaData = event.state().metaData().index(index);
- if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) {
- for (Integer shardId : indexService.shardIds()) {
- logger.debug("{}[{}] removing shard (index is closed)", index, shardId);
- try {
- indexService.removeShard(shardId, "removing shard (index is closed)");
- } catch (Throwable e) {
- logger.warn("{} failed to remove shard (index is closed)", e, index);
- }
- }
- }
+ private void cleanFailedShards(final ClusterChangedEvent event) {
+ RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
+ if (routingNode == null) {
+ failedShards.clear();
+ return;
}
- for (IndexService indexService : indicesService) {
- Index index = indexService.index();
- if (indexService.shardIds().isEmpty()) {
- if (logger.isDebugEnabled()) {
- logger.debug("{} cleaning index (no shards allocated)", index);
- }
- // clean the index
- removeIndex(index, "removing index (no shards allocated)");
+ RoutingTable routingTable = event.state().routingTable();
+ for (Iterator<Map.Entry<ShardId, ShardRouting>> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) {
+ Map.Entry<ShardId, ShardRouting> entry = iterator.next();
+ ShardId failedShardId = entry.getKey();
+ ShardRouting failedShardRouting = entry.getValue();
+ IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex());
+ if (indexRoutingTable == null) {
+ iterator.remove();
+ continue;
+ }
+ IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id());
+ if (shardRoutingTable == null) {
+ iterator.remove();
+ continue;
+ }
+ if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) {
+ iterator.remove();
}
}
}
@@ -218,16 +224,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
final String localNodeId = event.state().nodes().localNodeId();
assert localNodeId != null;
- for (IndexService indexService : indicesService) {
- IndexMetaData indexMetaData = event.state().metaData().index(indexService.index().getName());
- if (indexMetaData != null) {
- if (!indexMetaData.isSameUUID(indexService.indexUUID())) {
- logger.debug("[{}] mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated", indexMetaData.getIndex());
- deleteIndex(indexMetaData.getIndex(), "mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated");
- }
- }
- }
-
for (Index index : event.indicesDeleted()) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index, no longer part of the metadata", index);
@@ -249,7 +245,17 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
}
-
+ for (IndexService indexService : indicesService) {
+ IndexMetaData indexMetaData = event.state().metaData().index(indexService.index());
+ if (indexMetaData == null) {
+ assert false : "index" + indexService.index() + " exists locally, doesn't have a metadata but is not part "
+ + " of the delete index list. \nprevious state: " + event.previousState().prettyPrint()
+ + "\n current state:\n" + event.state().prettyPrint();
+ logger.warn("[{}] isn't part of metadata but is part of in memory structures. removing",
+ indexService.index());
+ deleteIndex(indexService.index(), "isn't part of metadata (explicit check)");
+ }
+ }
}
private void applyDeletedShards(final ClusterChangedEvent event) {
@@ -257,62 +263,81 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (routingNode == null) {
return;
}
- IntHashSet newShardIds = new IntHashSet();
+ Set<String> newShardAllocationIds = new HashSet<>();
for (IndexService indexService : indicesService) {
Index index = indexService.index();
- IndexMetaData indexMetaData = event.state().metaData().getIndexSafe(index);
- if (indexMetaData == null) {
- continue;
- }
+ IndexMetaData indexMetaData = event.state().metaData().index(index);
+ assert indexMetaData != null : "local index doesn't have metadata, should have been cleaned up by applyDeletedIndices: " + index;
// now, go over and delete shards that needs to get deleted
- newShardIds.clear();
+ newShardAllocationIds.clear();
for (ShardRouting shard : routingNode) {
if (shard.index().equals(index)) {
- newShardIds.add(shard.id());
+ // use the allocation id and not object so we won't be influence by relocation targets
+ newShardAllocationIds.add(shard.allocationId().getId());
}
}
- for (Integer existingShardId : indexService.shardIds()) {
- if (!newShardIds.contains(existingShardId)) {
+ for (IndexShard existingShard : indexService) {
+ if (newShardAllocationIds.contains(existingShard.routingEntry().allocationId().getId()) == false) {
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
if (logger.isDebugEnabled()) {
- logger.debug("{}[{}] removing shard (index is closed)", index, existingShardId);
+ logger.debug("{} removing shard (index is closed)", existingShard.shardId());
}
- indexService.removeShard(existingShardId, "removing shard (index is closed)");
+ indexService.removeShard(existingShard.shardId().id(), "removing shard (index is closed)");
} else {
// we can just remove the shard, without cleaning it locally, since we will clean it
// when all shards are allocated in the IndicesStore
if (logger.isDebugEnabled()) {
- logger.debug("{}[{}] removing shard (not allocated)", index, existingShardId);
+ logger.debug("{} removing shard (not allocated)", existingShard.shardId());
}
- indexService.removeShard(existingShardId, "removing shard (not allocated)");
+ indexService.removeShard(existingShard.shardId().id(), "removing shard (not allocated)");
}
}
}
}
}
- private void applyNewIndices(final ClusterChangedEvent event) {
- // we only create indices for shards that are allocated
- RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
- if (routingNode == null) {
- return;
+ private void applyCleanedIndices(final ClusterChangedEvent event) {
+ // handle closed indices, since they are not allocated on a node once they are closed
+ // so applyDeletedIndices might not take them into account
+ for (IndexService indexService : indicesService) {
+ Index index = indexService.index();
+ IndexMetaData indexMetaData = event.state().metaData().index(index);
+ if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) {
+ for (Integer shardId : indexService.shardIds()) {
+ logger.debug("{}[{}] removing shard (index is closed)", index, shardId);
+ try {
+ indexService.removeShard(shardId, "removing shard (index is closed)");
+ } catch (Throwable e) {
+ logger.warn("{} failed to remove shard (index is closed)", e, index);
+ }
+ }
+ }
}
- for (ShardRouting shard : routingNode) {
- if (!indicesService.hasIndex(shard.index())) {
- final IndexMetaData indexMetaData = event.state().metaData().getIndexSafe(shard.index());
+
+ final Set<Index> hasAllocations = new HashSet<>();
+ final RoutingNode node = event.state().getRoutingNodes().node(event.state().nodes().localNodeId());
+ // if no shards are allocated ie. if this node is a master-only node it can return nul
+ if (node != null) {
+ for (ShardRouting routing : node) {
+ hasAllocations.add(routing.index());
+ }
+ }
+ for (IndexService indexService : indicesService) {
+ Index index = indexService.index();
+ if (hasAllocations.contains(index) == false) {
+ assert indexService.shardIds().isEmpty() :
+ "no locally assigned shards, but index wasn't emptied by applyDeletedShards."
+ + " index " + index + ", shards: " + indexService.shardIds();
if (logger.isDebugEnabled()) {
- logger.debug("[{}] creating index", indexMetaData.getIndex());
- }
- try {
- indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener);
- } catch (Throwable e) {
- sendFailShard(shard, "failed to create index", e);
+ logger.debug("{} cleaning index (no shards allocated)", index);
}
+ // clean the index
+ removeIndex(index, "removing index (no shards allocated)");
}
}
}
- private void applySettings(ClusterChangedEvent event) {
+ private void applyIndexMetaData(ClusterChangedEvent event) {
if (!event.metaDataChanged()) {
return;
}
@@ -335,6 +360,26 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
}
+ private void applyNewIndices(final ClusterChangedEvent event) {
+ // we only create indices for shards that are allocated
+ RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
+ if (routingNode == null) {
+ return;
+ }
+ for (ShardRouting shard : routingNode) {
+ if (!indicesService.hasIndex(shard.index())) {
+ final IndexMetaData indexMetaData = event.state().metaData().getIndexSafe(shard.index());
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] creating index", indexMetaData.getIndex());
+ }
+ try {
+ indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener);
+ } catch (Throwable e) {
+ sendFailShard(shard, "failed to create index", e);
+ }
+ }
+ }
+ }
private void applyMappings(ClusterChangedEvent event) {
// go over and update mappings
@@ -361,8 +406,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
if (requireRefresh && sendRefreshMapping) {
nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
- new NodeMappingRefreshAction.NodeMappingRefreshRequest(index.getName(), indexMetaData.getIndexUUID(),
- event.state().nodes().localNodeId())
+ new NodeMappingRefreshAction.NodeMappingRefreshRequest(index.getName(), indexMetaData.getIndexUUID(),
+ event.state().nodes().localNodeId())
);
}
} catch (Throwable t) {
@@ -426,14 +471,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
for (final ShardRouting shardRouting : routingNode) {
final IndexService indexService = indicesService.indexService(shardRouting.index());
if (indexService == null) {
- // got deleted on us, ignore
+ // creation failed for some reasons
+ assert failedShards.containsKey(shardRouting.shardId()) :
+ "index has local allocation but is not created by applyNewIndices and is not failed " + shardRouting;
continue;
}
final IndexMetaData indexMetaData = event.state().metaData().index(shardRouting.index());
- if (indexMetaData == null) {
- // the index got deleted on the metadata, we will clean it later in the apply deleted method call
- continue;
- }
+ assert indexMetaData != null : "index has local allocation but no meta data. " + shardRouting.index();
final int shardId = shardRouting.id();
@@ -458,12 +502,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// for example: a shard that recovers from one node and now needs to recover to another node,
// or a replica allocated and then allocating a primary because the primary failed on another node
boolean shardHasBeenRemoved = false;
- if (currentRoutingEntry.isSameAllocation(shardRouting) == false) {
- logger.debug("[{}][{}] removing shard (different instance of it allocated on this node, current [{}], global [{}])", shardRouting.index(), shardRouting.id(), currentRoutingEntry, shardRouting);
- // closing the shard will also cancel any ongoing recovery.
- indexService.removeShard(shardRouting.id(), "removing shard (different instance of it allocated on this node)");
- shardHasBeenRemoved = true;
- } else if (isPeerRecovery(shardRouting)) {
+ assert currentRoutingEntry.isSameAllocation(shardRouting) :
+ "local shard has a different allocation id but wasn't cleaning by applyDeletedShards. "
+ + "cluster state: " + shardRouting + " local: " + currentRoutingEntry;
+ if (isPeerRecovery(shardRouting)) {
final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(routingTable, nodes, shardRouting);
// check if there is an existing recovery going, and if so, and the source node is not the same, cancel the recovery to restart it
if (recoveryTargetService.cancelRecoveriesForShard(indexShard.shardId(), "recovery source node changed", status -> !status.sourceNode().equals(sourceNode))) {
@@ -477,7 +519,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (shardHasBeenRemoved == false) {
// shadow replicas do not support primary promotion. The master would reinitialize the shard, giving it a new allocation, meaning we should be there.
assert (shardRouting.primary() && currentRoutingEntry.primary() == false) == false || indexShard.allowsPrimaryPromotion() :
- "shard for doesn't support primary promotion but master promoted it with changing allocation. New routing " + shardRouting + ", current routing " + currentRoutingEntry;
+ "shard for doesn't support primary promotion but master promoted it with changing allocation. New routing " + shardRouting + ", current routing " + currentRoutingEntry;
try {
indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false);
} catch (Throwable e) {
@@ -487,44 +529,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
if (shardRouting.initializing()) {
- applyInitializingShard(event.state(), indexMetaData, shardRouting);
- }
- }
- }
-
- private void cleanFailedShards(final ClusterChangedEvent event) {
- RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
- if (routingNode == null) {
- failedShards.clear();
- return;
- }
- RoutingTable routingTable = event.state().routingTable();
- for (Iterator<Map.Entry<ShardId, ShardRouting>> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) {
- Map.Entry<ShardId, ShardRouting> entry = iterator.next();
- ShardId failedShardId = entry.getKey();
- ShardRouting failedShardRouting = entry.getValue();
- IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex());
- if (indexRoutingTable == null) {
- iterator.remove();
- continue;
- }
- IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id());
- if (shardRoutingTable == null) {
- iterator.remove();
- continue;
- }
- if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) {
- iterator.remove();
+ applyInitializingShard(event.state(), indexMetaData, indexService, shardRouting);
}
}
}
- private void applyInitializingShard(final ClusterState state, final IndexMetaData indexMetaData, final ShardRouting shardRouting) {
- final IndexService indexService = indicesService.indexService(shardRouting.index());
- if (indexService == null) {
- // got deleted on us, ignore
- return;
- }
+ private void applyInitializingShard(final ClusterState state, final IndexMetaData indexMetaData, IndexService indexService, final ShardRouting shardRouting) {
final RoutingTable routingTable = state.routingTable();
final DiscoveryNodes nodes = state.getNodes();
final int shardId = shardRouting.id();
@@ -537,7 +547,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// we managed to tell the master we started), mark us as started
if (logger.isTraceEnabled()) {
logger.trace("{} master marked shard as initializing, but shard has state [{}], resending shard started to {}",
- indexShard.shardId(), indexShard.state(), nodes.masterNode());
+ indexShard.shardId(), indexShard.state(), nodes.masterNode());
}
if (nodes.masterNode() != null) {
shardStateAction.shardStarted(shardRouting,
@@ -618,8 +628,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
assert indexShard.routingEntry().equals(shardRouting); // should have already be done before
// recover from filesystem store
final RecoveryState recoveryState = new RecoveryState(indexShard.shardId(), shardRouting.primary(),
- RecoveryState.Type.STORE,
- nodes.localNode(), nodes.localNode());
+ RecoveryState.Type.STORE,
+ nodes.localNode(), nodes.localNode());
indexShard.markAsRecovering("from store", recoveryState); // mark the shard as recovering on the cluster state thread
threadPool.generic().execute(() -> {
try {
@@ -634,7 +644,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
} else {
// recover from a restore
final RecoveryState recoveryState = new RecoveryState(indexShard.shardId(), shardRouting.primary(),
- RecoveryState.Type.SNAPSHOT, shardRouting.restoreSource(), nodes.localNode());
+ RecoveryState.Type.SNAPSHOT, shardRouting.restoreSource(), nodes.localNode());
indexShard.markAsRecovering("from snapshot", recoveryState); // mark the shard as recovering on the cluster state thread
threadPool.generic().execute(() -> {
final ShardId sId = indexShard.shardId();
diff --git a/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java b/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java
index ddf3781d1a..16b3aa10a2 100644
--- a/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java
+++ b/core/src/main/java/org/elasticsearch/ingest/core/CompoundProcessor.java
@@ -28,15 +28,16 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.stream.Collectors;
/**
* A Processor that executes a list of other "processors". It executes a separate list of
* "onFailureProcessors" when any of the processors throw an {@link Exception}.
*/
public class CompoundProcessor implements Processor {
- static final String ON_FAILURE_MESSAGE_FIELD = "on_failure_message";
- static final String ON_FAILURE_PROCESSOR_TYPE_FIELD = "on_failure_processor_type";
- static final String ON_FAILURE_PROCESSOR_TAG_FIELD = "on_failure_processor_tag";
+ public static final String ON_FAILURE_MESSAGE_FIELD = "on_failure_message";
+ public static final String ON_FAILURE_PROCESSOR_TYPE_FIELD = "on_failure_processor_type";
+ public static final String ON_FAILURE_PROCESSOR_TAG_FIELD = "on_failure_processor_tag";
private final List<Processor> processors;
private final List<Processor> onFailureProcessors;
@@ -84,7 +85,7 @@ public class CompoundProcessor implements Processor {
@Override
public String getTag() {
- return "compound-processor-" + Objects.hash(processors, onFailureProcessors);
+ return "CompoundProcessor-" + flattenProcessors().stream().map(Processor::getTag).collect(Collectors.joining("-"));
}
@Override
@@ -104,18 +105,27 @@ public class CompoundProcessor implements Processor {
}
void executeOnFailure(IngestDocument ingestDocument, Exception cause, String failedProcessorType, String failedProcessorTag) throws Exception {
- Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();
try {
- ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getMessage());
- ingestMetadata.put(ON_FAILURE_PROCESSOR_TYPE_FIELD, failedProcessorType);
- ingestMetadata.put(ON_FAILURE_PROCESSOR_TAG_FIELD, failedProcessorTag);
+ putFailureMetadata(ingestDocument, cause, failedProcessorType, failedProcessorTag);
for (Processor processor : onFailureProcessors) {
processor.execute(ingestDocument);
}
} finally {
- ingestMetadata.remove(ON_FAILURE_MESSAGE_FIELD);
- ingestMetadata.remove(ON_FAILURE_PROCESSOR_TYPE_FIELD);
- ingestMetadata.remove(ON_FAILURE_PROCESSOR_TAG_FIELD);
+ removeFailureMetadata(ingestDocument);
}
}
+
+ private void putFailureMetadata(IngestDocument ingestDocument, Exception cause, String failedProcessorType, String failedProcessorTag) {
+ Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();
+ ingestMetadata.put(ON_FAILURE_MESSAGE_FIELD, cause.getMessage());
+ ingestMetadata.put(ON_FAILURE_PROCESSOR_TYPE_FIELD, failedProcessorType);
+ ingestMetadata.put(ON_FAILURE_PROCESSOR_TAG_FIELD, failedProcessorTag);
+ }
+
+ private void removeFailureMetadata(IngestDocument ingestDocument) {
+ Map<String, String> ingestMetadata = ingestDocument.getIngestMetadata();
+ ingestMetadata.remove(ON_FAILURE_MESSAGE_FIELD);
+ ingestMetadata.remove(ON_FAILURE_PROCESSOR_TYPE_FIELD);
+ ingestMetadata.remove(ON_FAILURE_PROCESSOR_TAG_FIELD);
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java b/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java
index 821a44c0a9..aaae929e0a 100644
--- a/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java
+++ b/core/src/main/java/org/elasticsearch/ingest/core/Pipeline.java
@@ -69,6 +69,13 @@ public final class Pipeline {
}
/**
+ * Get the underlying {@link CompoundProcessor} containing the Pipeline's processors
+ */
+ public CompoundProcessor getCompoundProcessor() {
+ return compoundProcessor;
+ }
+
+ /**
* Unmodifiable list containing each processor that operates on the data.
*/
public List<Processor> getProcessors() {
diff --git a/core/src/main/java/org/elasticsearch/ingest/processor/TrackingResultProcessor.java b/core/src/main/java/org/elasticsearch/ingest/processor/TrackingResultProcessor.java
new file mode 100644
index 0000000000..af820318d8
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/ingest/processor/TrackingResultProcessor.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ingest.processor;
+
+import org.elasticsearch.action.ingest.SimulateProcessorResult;
+import org.elasticsearch.ingest.core.CompoundProcessor;
+import org.elasticsearch.ingest.core.IngestDocument;
+import org.elasticsearch.ingest.core.Processor;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Processor to be used within Simulate API to keep track of processors executed in pipeline.
+ */
+public final class TrackingResultProcessor implements Processor {
+
+ private final Processor actualProcessor;
+ private final List<SimulateProcessorResult> processorResultList;
+
+ public TrackingResultProcessor(Processor actualProcessor, List<SimulateProcessorResult> processorResultList) {
+ this.processorResultList = processorResultList;
+ if (actualProcessor instanceof CompoundProcessor) {
+ CompoundProcessor trackedCompoundProcessor = decorate((CompoundProcessor) actualProcessor, processorResultList);
+ this.actualProcessor = trackedCompoundProcessor;
+ } else {
+ this.actualProcessor = actualProcessor;
+ }
+ }
+
+ @Override
+ public void execute(IngestDocument ingestDocument) throws Exception {
+ try {
+ actualProcessor.execute(ingestDocument);
+ processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument)));
+ } catch (Exception e) {
+ processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), e));
+ throw e;
+ }
+ }
+
+ @Override
+ public String getType() {
+ return actualProcessor.getType();
+ }
+
+ @Override
+ public String getTag() {
+ return actualProcessor.getTag();
+ }
+
+ public static CompoundProcessor decorate(CompoundProcessor compoundProcessor, List<SimulateProcessorResult> processorResultList) {
+ List<Processor> processors = new ArrayList<>(compoundProcessor.getProcessors().size());
+ for (Processor processor : compoundProcessor.getProcessors()) {
+ if (processor instanceof CompoundProcessor) {
+ processors.add(decorate((CompoundProcessor) processor, processorResultList));
+ } else {
+ processors.add(new TrackingResultProcessor(processor, processorResultList));
+ }
+ }
+ List<Processor> onFailureProcessors = new ArrayList<>(compoundProcessor.getProcessors().size());
+ for (Processor processor : compoundProcessor.getOnFailureProcessors()) {
+ if (processor instanceof CompoundProcessor) {
+ onFailureProcessors.add(decorate((CompoundProcessor) processor, processorResultList));
+ } else {
+ onFailureProcessors.add(new TrackingResultProcessor(processor, processorResultList));
+ }
+ }
+ return new CompoundProcessor(processors, onFailureProcessors);
+ }
+}
+
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/allocation/RestClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/allocation/RestClusterAllocationExplainAction.java
new file mode 100644
index 0000000000..06ba2a9be8
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/allocation/RestClusterAllocationExplainAction.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.cluster.allocation;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest;
+import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.BytesRestResponse;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestResponse;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.action.support.RestActions;
+import org.elasticsearch.rest.action.support.RestBuilderListener;
+
+import java.io.IOException;
+
+/**
+ * Class handling cluster allocation explanation at the REST level
+ */
+public class RestClusterAllocationExplainAction extends BaseRestHandler {
+
+ @Inject
+ public RestClusterAllocationExplainAction(Settings settings, RestController controller, Client client) {
+ super(settings, client);
+ controller.registerHandler(RestRequest.Method.GET, "/_cluster/allocation/explain", this);
+ controller.registerHandler(RestRequest.Method.POST, "/_cluster/allocation/explain", this);
+ }
+
+ @Override
+ public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
+ ClusterAllocationExplainRequest req;
+ if (RestActions.hasBodyContent(request) == false) {
+ // Empty request signals "explain the first unassigned shard you find"
+ req = new ClusterAllocationExplainRequest();
+ } else {
+ BytesReference content = RestActions.getRestContent(request);
+ try (XContentParser parser = XContentFactory.xContent(content).createParser(content)) {
+ req = ClusterAllocationExplainRequest.parse(parser);
+ } catch (IOException e) {
+ logger.debug("failed to parse allocation explain request", e);
+ channel.sendResponse(new BytesRestResponse(ExceptionsHelper.status(e)));
+ return;
+ }
+ }
+
+ try {
+ req.includeYesDecisions(request.paramAsBoolean("include_yes_decisions", false));
+ client.admin().cluster().allocationExplain(req, new RestBuilderListener<ClusterAllocationExplainResponse>(channel) {
+ @Override
+ public RestResponse buildResponse(ClusterAllocationExplainResponse response, XContentBuilder builder) throws Exception {
+ response.getExplanation().toXContent(builder, ToXContent.EMPTY_PARAMS);
+ return new BytesRestResponse(RestStatus.OK, builder);
+ }
+ });
+ } catch (Exception e) {
+ logger.error("failed to explain allocation", e);
+ channel.sendResponse(new BytesRestResponse(ExceptionsHelper.status(e)));
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchModule.java b/core/src/main/java/org/elasticsearch/search/SearchModule.java
index 96db4b1146..8756a31c44 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchModule.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchModule.java
@@ -21,21 +21,13 @@ package org.elasticsearch.search;
import org.apache.lucene.search.BooleanQuery;
import org.elasticsearch.common.geo.ShapesAvailability;
-import org.elasticsearch.common.geo.builders.CircleBuilder;
-import org.elasticsearch.common.geo.builders.EnvelopeBuilder;
-import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder;
-import org.elasticsearch.common.geo.builders.LineStringBuilder;
-import org.elasticsearch.common.geo.builders.MultiLineStringBuilder;
-import org.elasticsearch.common.geo.builders.MultiPointBuilder;
-import org.elasticsearch.common.geo.builders.MultiPolygonBuilder;
-import org.elasticsearch.common.geo.builders.PointBuilder;
-import org.elasticsearch.common.geo.builders.PolygonBuilder;
-import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilders;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.percolator.PercolatorHighlightSubFetchPhase;
import org.elasticsearch.index.query.BoolQueryParser;
import org.elasticsearch.index.query.BoostingQueryParser;
import org.elasticsearch.index.query.CommonTermsQueryParser;
@@ -216,7 +208,6 @@ import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.innerhits.InnerHitsFetchSubPhase;
import org.elasticsearch.search.fetch.matchedqueries.MatchedQueriesFetchSubPhase;
import org.elasticsearch.search.fetch.parent.ParentFieldSubFetchPhase;
-import org.elasticsearch.index.percolator.PercolatorHighlightSubFetchPhase;
import org.elasticsearch.search.fetch.script.ScriptFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.source.FetchSourceSubPhase;
import org.elasticsearch.search.fetch.version.VersionFetchSubPhase;
@@ -286,6 +277,8 @@ public class SearchModule extends AbstractModule {
registerBuiltinFunctionScoreParsers();
registerBuiltinQueryParsers();
+ registerBuiltinRescorers();
+ registerBuiltinSorts();
}
public void registerHighlighter(String key, Class<? extends Highlighter> clazz) {
@@ -350,8 +343,6 @@ public class SearchModule extends AbstractModule {
configureSuggesters();
configureFetchSubPhase();
configureShapes();
- configureRescorers();
- configureSorts();
}
protected void configureFetchSubPhase() {
@@ -479,27 +470,19 @@ public class SearchModule extends AbstractModule {
private void configureShapes() {
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE);
+ ShapeBuilders.register(namedWriteableRegistry);
}
}
- private void configureRescorers() {
- namedWriteableRegistry.registerPrototype(RescoreBuilder.class, QueryRescorerBuilder.PROTOTYPE);
+ private void registerBuiltinRescorers() {
+ namedWriteableRegistry.register(RescoreBuilder.class, QueryRescorerBuilder.NAME, QueryRescorerBuilder::new);
}
- private void configureSorts() {
- namedWriteableRegistry.registerPrototype(SortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(SortBuilder.class, ScoreSortBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(SortBuilder.class, ScriptSortBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(SortBuilder.class, FieldSortBuilder.PROTOTYPE);
+ private void registerBuiltinSorts() {
+ namedWriteableRegistry.register(SortBuilder.class, GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder::new);
+ namedWriteableRegistry.register(SortBuilder.class, ScoreSortBuilder.NAME, ScoreSortBuilder::new);
+ namedWriteableRegistry.register(SortBuilder.class, ScriptSortBuilder.NAME, ScriptSortBuilder::new);
+ namedWriteableRegistry.register(SortBuilder.class, FieldSortBuilder.NAME, FieldSortBuilder::new);
}
private void registerBuiltinFunctionScoreParsers() {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregatorBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregatorBuilder.java
index 5969265f75..a11bfd113b 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregatorBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceAggregatorBuilder.java
@@ -211,7 +211,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder<
}
factory.keyed = in.readBoolean();
factory.distanceType = GeoDistance.readGeoDistanceFrom(in);
- factory.unit = DistanceUnit.readDistanceUnit(in);
+ factory.unit = DistanceUnit.readFromStream(in);
return factory;
}
@@ -225,7 +225,7 @@ public class GeoDistanceAggregatorBuilder extends ValuesSourceAggregatorBuilder<
}
out.writeBoolean(keyed);
distanceType.writeTo(out);
- DistanceUnit.writeDistanceUnit(out, unit);
+ unit.writeTo(out);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java
index b0d5a325e5..959bd51270 100644
--- a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java
+++ b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java
@@ -85,10 +85,7 @@ public enum QueryRescoreMode implements Writeable<QueryRescoreMode> {
public abstract float combine(float primary, float secondary);
- static QueryRescoreMode PROTOTYPE = Total;
-
- @Override
- public QueryRescoreMode readFrom(StreamInput in) throws IOException {
+ public static QueryRescoreMode readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown ScoreMode ordinal [" + ordinal + "]");
diff --git a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java
index c65fca79a9..8556426557 100644
--- a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescorerBuilder.java
@@ -25,7 +25,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
@@ -39,8 +38,6 @@ public class QueryRescorerBuilder extends RescoreBuilder<QueryRescorerBuilder> {
public static final String NAME = "query";
- public static final QueryRescorerBuilder PROTOTYPE = new QueryRescorerBuilder(new MatchAllQueryBuilder());
-
public static final float DEFAULT_RESCORE_QUERYWEIGHT = 1.0f;
public static final float DEFAULT_QUERYWEIGHT = 1.0f;
public static final QueryRescoreMode DEFAULT_SCORE_MODE = QueryRescoreMode.Total;
@@ -78,6 +75,25 @@ public class QueryRescorerBuilder extends RescoreBuilder<QueryRescorerBuilder> {
}
/**
+ * Read from a stream.
+ */
+ public QueryRescorerBuilder(StreamInput in) throws IOException {
+ super(in);
+ queryBuilder = in.readQuery();
+ scoreMode = QueryRescoreMode.readFromStream(in);
+ rescoreQueryWeight = in.readFloat();
+ queryWeight = in.readFloat();
+ }
+
+ @Override
+ public void doWriteTo(StreamOutput out) throws IOException {
+ out.writeQuery(queryBuilder);
+ scoreMode.writeTo(out);
+ out.writeFloat(rescoreQueryWeight);
+ out.writeFloat(queryWeight);
+ }
+
+ /**
* @return the query used for this rescore query
*/
public QueryBuilder<?> getRescoreQuery() {
@@ -140,9 +156,9 @@ public class QueryRescorerBuilder extends RescoreBuilder<QueryRescorerBuilder> {
builder.endObject();
}
- public QueryRescorerBuilder fromXContent(QueryParseContext parseContext) throws IOException {
- InnerBuilder innerBuilder = QUERY_RESCORE_PARSER.parse(parseContext.parser(), new InnerBuilder(), parseContext);
- return innerBuilder.build();
+ public static QueryRescorerBuilder fromXContent(QueryParseContext parseContext) throws IOException {
+ InnerBuilder innerBuilder = QUERY_RESCORE_PARSER.parse(parseContext.parser(), new InnerBuilder(), parseContext);
+ return innerBuilder.build();
}
@Override
@@ -182,23 +198,6 @@ public class QueryRescorerBuilder extends RescoreBuilder<QueryRescorerBuilder> {
}
@Override
- public QueryRescorerBuilder doReadFrom(StreamInput in) throws IOException {
- QueryRescorerBuilder rescorer = new QueryRescorerBuilder(in.readQuery());
- rescorer.setScoreMode(QueryRescoreMode.PROTOTYPE.readFrom(in));
- rescorer.setRescoreQueryWeight(in.readFloat());
- rescorer.setQueryWeight(in.readFloat());
- return rescorer;
- }
-
- @Override
- public void doWriteTo(StreamOutput out) throws IOException {
- out.writeQuery(queryBuilder);
- scoreMode.writeTo(out);
- out.writeFloat(rescoreQueryWeight);
- out.writeFloat(queryWeight);
- }
-
- @Override
public String getWriteableName() {
return NAME;
}
@@ -208,7 +207,7 @@ public class QueryRescorerBuilder extends RescoreBuilder<QueryRescorerBuilder> {
* for the constructor of {@link QueryRescorerBuilder}, but {@link ObjectParser} only
* allows filling properties of an already constructed value.
*/
- private class InnerBuilder {
+ private static class InnerBuilder {
private QueryBuilder<?> queryBuilder;
private float rescoreQueryWeight = DEFAULT_RESCORE_QUERYWEIGHT;
diff --git a/core/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java b/core/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java
index 8dad07a543..3288538086 100644
--- a/core/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/rescore/RescoreBuilder.java
@@ -46,6 +46,27 @@ public abstract class RescoreBuilder<RB extends RescoreBuilder<RB>> implements T
private static ParseField WINDOW_SIZE_FIELD = new ParseField("window_size");
+ /**
+ * Construct an empty RescoreBuilder.
+ */
+ public RescoreBuilder() {
+ }
+
+ /**
+ * Read from a stream.
+ */
+ protected RescoreBuilder(StreamInput in) throws IOException {
+ windowSize = in.readOptionalVInt();
+ }
+
+ @Override
+ public final void writeTo(StreamOutput out) throws IOException {
+ out.writeOptionalVInt(this.windowSize);
+ doWriteTo(out);
+ }
+
+ protected abstract void doWriteTo(StreamOutput out) throws IOException;
+
@SuppressWarnings("unchecked")
public RB windowSize(int windowSize) {
this.windowSize = windowSize;
@@ -74,7 +95,7 @@ public abstract class RescoreBuilder<RB extends RescoreBuilder<RB>> implements T
} else if (token == XContentParser.Token.START_OBJECT) {
// we only have QueryRescorer at this point
if (QueryRescorerBuilder.NAME.equals(fieldName)) {
- rescorer = QueryRescorerBuilder.PROTOTYPE.fromXContent(parseContext);
+ rescorer = QueryRescorerBuilder.fromXContent(parseContext);
} else {
throw new ParsingException(parser.getTokenLocation(), "rescore doesn't support rescorer with name [" + fieldName + "]");
}
@@ -129,23 +150,6 @@ public abstract class RescoreBuilder<RB extends RescoreBuilder<RB>> implements T
}
@Override
- public RB readFrom(StreamInput in) throws IOException {
- RB builder = doReadFrom(in);
- builder.windowSize = in.readOptionalVInt();
- return builder;
- }
-
- protected abstract RB doReadFrom(StreamInput in) throws IOException;
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- doWriteTo(out);
- out.writeOptionalVInt(this.windowSize);
- }
-
- protected abstract void doWriteTo(StreamOutput out) throws IOException;
-
- @Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
diff --git a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
index 414062c0cd..bbe6f12ff3 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java
@@ -42,7 +42,6 @@ import java.util.Objects;
* A sort builder to sort based on a document field.
*/
public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
- public static final FieldSortBuilder PROTOTYPE = new FieldSortBuilder("_na_");
public static final String NAME = "field_sort";
public static final ParseField NESTED_PATH = new ParseField("nested_path");
public static final ParseField NESTED_FILTER = new ParseField("nested_filter");
@@ -96,6 +95,30 @@ public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
this.fieldName = fieldName;
}
+ /**
+ * Read from a stream.
+ */
+ public FieldSortBuilder(StreamInput in) throws IOException {
+ fieldName = in.readString();
+ nestedFilter = in.readOptionalQuery();
+ nestedPath = in.readOptionalString();
+ missing = in.readGenericValue();
+ order = in.readOptionalWriteable(SortOrder::readFromStream);
+ sortMode = in.readOptionalWriteable(SortMode::readFromStream);
+ unmappedType = in.readOptionalString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(fieldName);
+ out.writeOptionalQuery(nestedFilter);
+ out.writeOptionalString(nestedPath);
+ out.writeGenericValue(missing);
+ out.writeOptionalWriteable(order);
+ out.writeOptionalWriteable(sortMode);
+ out.writeOptionalString(unmappedType);
+ }
+
/** Returns the document field this sort should be based on. */
public String getFieldName() {
return this.fieldName;
@@ -291,55 +314,16 @@ public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
return NAME;
}
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeString(this.fieldName);
- if (this.nestedFilter != null) {
- out.writeBoolean(true);
- out.writeQuery(this.nestedFilter);
- } else {
- out.writeBoolean(false);
- }
- out.writeOptionalString(this.nestedPath);
- out.writeGenericValue(this.missing);
-
- if (this.order != null) {
- out.writeBoolean(true);
- this.order.writeTo(out);
- } else {
- out.writeBoolean(false);
- }
-
- out.writeBoolean(this.sortMode != null);
- if (this.sortMode != null) {
- this.sortMode.writeTo(out);
- }
- out.writeOptionalString(this.unmappedType);
- }
-
- @Override
- public FieldSortBuilder readFrom(StreamInput in) throws IOException {
- String fieldName = in.readString();
- FieldSortBuilder result = new FieldSortBuilder(fieldName);
- if (in.readBoolean()) {
- QueryBuilder<?> query = in.readQuery();
- result.setNestedFilter(query);
- }
- result.setNestedPath(in.readOptionalString());
- result.missing(in.readGenericValue());
-
- if (in.readBoolean()) {
- result.order(SortOrder.readOrderFrom(in));
- }
- if (in.readBoolean()) {
- result.sortMode(SortMode.PROTOTYPE.readFrom(in));
- }
- result.unmappedType(in.readOptionalString());
- return result;
- }
-
- @Override
- public FieldSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException {
+ /**
+ * Creates a new {@link FieldSortBuilder} from the query held by the {@link QueryParseContext} in
+ * {@link org.elasticsearch.common.xcontent.XContent} format.
+ *
+ * @param context the input parse context. The state on the parser contained in this context will be changed as a side effect of this
+ * method call
+ * @param fieldName in some sort syntax variations the field name precedes the xContent object that specifies further parameters, e.g.
+ * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument
+ */
+ public static FieldSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException {
XContentParser parser = context.parser();
QueryBuilder<?> nestedFilter = null;
diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
index c6a63d5f08..1f5dccbdf4 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java
@@ -75,8 +75,6 @@ public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
public static final ParseField NESTED_FILTER_FIELD = new ParseField("nested_filter");
public static final ParseField REVERSE_FORBIDDEN = new ParseField("reverse");
- public static final GeoDistanceSortBuilder PROTOTYPE = new GeoDistanceSortBuilder("_na_", -1, -1);
-
private final String fieldName;
private final List<GeoPoint> points = new ArrayList<>();
@@ -150,6 +148,37 @@ public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
}
/**
+ * Read from a stream.
+ */
+ @SuppressWarnings("unchecked")
+ public GeoDistanceSortBuilder(StreamInput in) throws IOException {
+ fieldName = in.readString();
+ points.addAll((List<GeoPoint>) in.readGenericValue());
+ geoDistance = GeoDistance.readGeoDistanceFrom(in);
+ unit = DistanceUnit.readFromStream(in);
+ order = SortOrder.readFromStream(in);
+ sortMode = in.readOptionalWriteable(SortMode::readFromStream);
+ nestedFilter = in.readOptionalQuery();
+ nestedPath = in.readOptionalString();
+ coerce = in.readBoolean();
+ ignoreMalformed =in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(fieldName);
+ out.writeGenericValue(points);
+ geoDistance.writeTo(out);
+ unit.writeTo(out);
+ order.writeTo(out);
+ out.writeOptionalWriteable(sortMode);
+ out.writeOptionalQuery(nestedFilter);
+ out.writeOptionalString(nestedPath);
+ out.writeBoolean(coerce);
+ out.writeBoolean(ignoreMalformed);
+ }
+
+ /**
* Returns the geo point like field the distance based sort operates on.
* */
public String fieldName() {
@@ -366,53 +395,16 @@ public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
this.unit, this.sortMode, this.order, this.nestedFilter, this.nestedPath, this.coerce, this.ignoreMalformed);
}
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeString(fieldName);
- out.writeGenericValue(points);
-
- geoDistance.writeTo(out);
- unit.writeTo(out);
- order.writeTo(out);
- out.writeBoolean(this.sortMode != null);
- if (this.sortMode != null) {
- sortMode.writeTo(out);
- }
- if (nestedFilter != null) {
- out.writeBoolean(true);
- out.writeQuery(nestedFilter);
- } else {
- out.writeBoolean(false);
- }
- out.writeOptionalString(nestedPath);
- out.writeBoolean(coerce);
- out.writeBoolean(ignoreMalformed);
- }
-
- @Override
- public GeoDistanceSortBuilder readFrom(StreamInput in) throws IOException {
- String fieldName = in.readString();
-
- ArrayList<GeoPoint> points = (ArrayList<GeoPoint>) in.readGenericValue();
- GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, points.toArray(new GeoPoint[points.size()]));
-
- result.geoDistance(GeoDistance.readGeoDistanceFrom(in));
- result.unit(DistanceUnit.readDistanceUnit(in));
- result.order(SortOrder.readOrderFrom(in));
- if (in.readBoolean()) {
- result.sortMode = SortMode.PROTOTYPE.readFrom(in);
- }
- if (in.readBoolean()) {
- result.setNestedFilter(in.readQuery());
- }
- result.setNestedPath(in.readOptionalString());
- result.coerce(in.readBoolean());
- result.ignoreMalformed(in.readBoolean());
- return result;
- }
-
- @Override
- public GeoDistanceSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException {
+ /**
+ * Creates a new {@link GeoDistanceSortBuilder} from the query held by the {@link QueryParseContext} in
+ * {@link org.elasticsearch.common.xcontent.XContent} format.
+ *
+ * @param context the input parse context. The state on the parser contained in this context will be changed as a side effect of this
+ * method call
+ * @param elementName in some sort syntax variations the field name precedes the xContent object that specifies further parameters, e.g.
+ * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument
+ */
+ public static GeoDistanceSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException {
XContentParser parser = context.parser();
ParseFieldMatcher parseFieldMatcher = context.parseFieldMatcher();
String fieldName = null;
diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java
index c222634ca0..fa4472fadf 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/ScoreSortBuilder.java
@@ -39,17 +39,31 @@ import java.util.Objects;
public class ScoreSortBuilder extends SortBuilder<ScoreSortBuilder> {
public static final String NAME = "_score";
- public static final ScoreSortBuilder PROTOTYPE = new ScoreSortBuilder();
+ public static final ParseField REVERSE_FIELD = new ParseField("reverse");
public static final ParseField ORDER_FIELD = new ParseField("order");
private static final ParseField REVERSE_FORBIDDEN = new ParseField("reverse");
private static final SortField SORT_SCORE = new SortField(null, SortField.Type.SCORE);
private static final SortField SORT_SCORE_REVERSE = new SortField(null, SortField.Type.SCORE, true);
+ /**
+ * Build a ScoreSortBuilder default to descending sort order.
+ */
public ScoreSortBuilder() {
// order defaults to desc when sorting on the _score
order(SortOrder.DESC);
}
+ /**
+ * Read from a stream.
+ */
+ public ScoreSortBuilder(StreamInput in) throws IOException {
+ order(SortOrder.readFromStream(in));
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ order.writeTo(out);
+ }
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
@@ -61,8 +75,16 @@ public class ScoreSortBuilder extends SortBuilder<ScoreSortBuilder> {
return builder;
}
- @Override
- public ScoreSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException {
+ /**
+ * Creates a new {@link ScoreSortBuilder} from the query held by the {@link QueryParseContext} in
+ * {@link org.elasticsearch.common.xcontent.XContent} format.
+ *
+ * @param context the input parse context. The state on the parser contained in this context will be changed as a side effect of this
+ * method call
+ * @param fieldName in some sort syntax variations the field name precedes the xContent object that specifies further parameters, e.g.
+ * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument
+ */
+ public static ScoreSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException {
XContentParser parser = context.parser();
ParseFieldMatcher matcher = context.parseFieldMatcher();
@@ -112,17 +134,6 @@ public class ScoreSortBuilder extends SortBuilder<ScoreSortBuilder> {
}
@Override
- public void writeTo(StreamOutput out) throws IOException {
- order.writeTo(out);
- }
-
- @Override
- public ScoreSortBuilder readFrom(StreamInput in) throws IOException {
- ScoreSortBuilder builder = new ScoreSortBuilder().order(SortOrder.readOrderFrom(in));
- return builder;
- }
-
- @Override
public String getWriteableName() {
return NAME;
}
diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
index b79eb6e214..2751d49751 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java
@@ -67,7 +67,6 @@ import java.util.Objects;
public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> {
public static final String NAME = "_script";
- public static final ScriptSortBuilder PROTOTYPE = new ScriptSortBuilder(new Script("_na_"), ScriptSortType.STRING);
public static final ParseField TYPE_FIELD = new ParseField("type");
public static final ParseField SCRIPT_FIELD = new ParseField("script");
public static final ParseField SORTMODE_FIELD = new ParseField("mode");
@@ -111,6 +110,28 @@ public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> {
}
/**
+ * Read from a stream.
+ */
+ public ScriptSortBuilder(StreamInput in) throws IOException {
+ script = Script.readScript(in);
+ type = ScriptSortType.readFromStream(in);
+ order = SortOrder.readFromStream(in);
+ sortMode = in.readOptionalWriteable(SortMode::readFromStream);
+ nestedPath = in.readOptionalString();
+ nestedFilter = in.readOptionalQuery();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ script.writeTo(out);
+ type.writeTo(out);
+ order.writeTo(out);
+ out.writeOptionalWriteable(sortMode);
+ out.writeOptionalString(nestedPath);
+ out.writeOptionalQuery(nestedFilter);
+ }
+
+ /**
* Get the script used in this sort.
*/
public Script script() {
@@ -198,8 +219,16 @@ public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> {
return builder;
}
- @Override
- public ScriptSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException {
+ /**
+ * Creates a new {@link ScriptSortBuilder} from the query held by the {@link QueryParseContext} in
+ * {@link org.elasticsearch.common.xcontent.XContent} format.
+ *
+ * @param context the input parse context. The state on the parser contained in this context will be changed as a side effect of this
+ * method call
+ * @param elementName in some sort syntax variations the field name precedes the xContent object that specifies further parameters, e.g.
+ * in '{ "foo": { "order" : "asc"} }'. When parsing the inner object, the field name can be passed in via this argument
+ */
+ public static ScriptSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException {
ScriptParameterParser scriptParameterParser = new ScriptParameterParser();
XContentParser parser = context.parser();
ParseFieldMatcher parseField = context.parseFieldMatcher();
@@ -363,37 +392,6 @@ public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> {
}
@Override
- public void writeTo(StreamOutput out) throws IOException {
- script.writeTo(out);
- type.writeTo(out);
- order.writeTo(out);
- out.writeBoolean(sortMode != null);
- if (sortMode != null) {
- sortMode.writeTo(out);
- }
- out.writeOptionalString(nestedPath);
- boolean hasNestedFilter = nestedFilter != null;
- out.writeBoolean(hasNestedFilter);
- if (hasNestedFilter) {
- out.writeQuery(nestedFilter);
- }
- }
-
- @Override
- public ScriptSortBuilder readFrom(StreamInput in) throws IOException {
- ScriptSortBuilder builder = new ScriptSortBuilder(Script.readScript(in), ScriptSortType.PROTOTYPE.readFrom(in));
- builder.order(SortOrder.readOrderFrom(in));
- if (in.readBoolean()) {
- builder.sortMode(SortMode.PROTOTYPE.readFrom(in));
- }
- builder.nestedPath = in.readOptionalString();
- if (in.readBoolean()) {
- builder.nestedFilter = in.readQuery();
- }
- return builder;
- }
-
- @Override
public String getWriteableName() {
return NAME;
}
@@ -404,15 +402,15 @@ public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> {
/** script sort for a numeric value **/
NUMBER;
- static ScriptSortType PROTOTYPE = STRING;
-
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
- @Override
- public ScriptSortType readFrom(final StreamInput in) throws IOException {
+ /**
+ * Read from a stream.
+ */
+ static ScriptSortType readFromStream(final StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown ScriptSortType ordinal [" + ordinal + "]");
diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java
index e007ac7736..ee6af01c93 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/SortBuilder.java
@@ -24,7 +24,6 @@ import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.join.BitSetProducer;
import org.elasticsearch.action.support.ToXContentToBytes;
-import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.lucene.search.Queries;
@@ -55,34 +54,18 @@ public abstract class SortBuilder<T extends SortBuilder<?>> extends ToXContentTo
protected SortOrder order = SortOrder.ASC;
public static final ParseField ORDER_FIELD = new ParseField("order");
- private static final Map<String, SortBuilder<?>> PARSERS;
-
+ private static final Map<String, Parser<?>> PARSERS;
static {
- Map<String, SortBuilder<?>> parsers = new HashMap<>();
- parsers.put(ScriptSortBuilder.NAME, ScriptSortBuilder.PROTOTYPE);
- parsers.put(GeoDistanceSortBuilder.NAME, new GeoDistanceSortBuilder("_na_", -1, -1));
- parsers.put(GeoDistanceSortBuilder.ALTERNATIVE_NAME, new GeoDistanceSortBuilder("_na_", -1, -1));
- parsers.put(ScoreSortBuilder.NAME, ScoreSortBuilder.PROTOTYPE);
+ Map<String, Parser<?>> parsers = new HashMap<>();
+ parsers.put(ScriptSortBuilder.NAME, ScriptSortBuilder::fromXContent);
+ parsers.put(GeoDistanceSortBuilder.NAME, GeoDistanceSortBuilder::fromXContent);
+ parsers.put(GeoDistanceSortBuilder.ALTERNATIVE_NAME, GeoDistanceSortBuilder::fromXContent);
+ parsers.put(ScoreSortBuilder.NAME, ScoreSortBuilder::fromXContent);
+ // FieldSortBuilder gets involved if the user specifies a name that isn't one of these.
PARSERS = unmodifiableMap(parsers);
}
/**
- * Creates a new {@link SortBuilder} from the query held by the {@link QueryParseContext}
- * in {@link org.elasticsearch.common.xcontent.XContent} format
- *
- * @param parseContext
- * the input parse context. The state on the parser contained in
- * this context will be changed as a side effect of this method call
- * @param fieldName
- * in some sort syntax variations the field name precedes the xContent object that
- * specifies further parameters, e.g. in '{ "foo": { "order" : "asc"} }'. When
- * parsing the inner object, the field name can be passed in via this argument
- *
- * @return the new sort builder instance
- */
- protected abstract T fromXContent(QueryParseContext parseContext, @Nullable String fieldName) throws IOException;
-
- /**
* Create a @link {@link SortField} from this builder.
*/
protected abstract SortField build(QueryShardContext context) throws IOException;
@@ -153,7 +136,7 @@ public abstract class SortBuilder<T extends SortBuilder<?>> extends ToXContentTo
if (PARSERS.containsKey(fieldName)) {
sortFields.add(PARSERS.get(fieldName).fromXContent(context, fieldName));
} else {
- sortFields.add(FieldSortBuilder.PROTOTYPE.fromXContent(context, fieldName));
+ sortFields.add(FieldSortBuilder.fromXContent(context, fieldName));
}
}
}
@@ -218,4 +201,9 @@ public abstract class SortBuilder<T extends SortBuilder<?>> extends ToXContentTo
}
return nested;
}
+
+ @FunctionalInterface
+ private interface Parser<T extends SortBuilder<?>> {
+ T fromXContent(QueryParseContext context, String elementName) throws IOException;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortMode.java b/core/src/main/java/org/elasticsearch/search/sort/SortMode.java
index 2f6ce9401d..c6b3e1b10b 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/SortMode.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/SortMode.java
@@ -50,15 +50,12 @@ public enum SortMode implements Writeable<SortMode> {
/** Use the median of all values as sort value. Only applicable for number based array fields. **/
MEDIAN;
- static SortMode PROTOTYPE = MIN;
-
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeVInt(ordinal());
}
- @Override
- public SortMode readFrom(final StreamInput in) throws IOException {
+ public static SortMode readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown SortMode ordinal [" + ordinal + "]");
diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java b/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java
index 73e5ac5524..a84a456775 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java
@@ -50,11 +50,8 @@ public enum SortOrder implements Writeable<SortOrder> {
return "desc";
}
};
-
- private static final SortOrder PROTOTYPE = ASC;
- @Override
- public SortOrder readFrom(StreamInput in) throws IOException {
+ static SortOrder readFromStream(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown SortOrder ordinal [" + ordinal + "]");
@@ -62,10 +59,6 @@ public enum SortOrder implements Writeable<SortOrder> {
return values()[ordinal];
}
- public static SortOrder readOrderFrom(StreamInput in) throws IOException {
- return PROTOTYPE.readFrom(in);
- }
-
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(this.ordinal());
diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java
index 2d804bfc78..cb0443587a 100644
--- a/core/src/main/java/org/elasticsearch/transport/TransportService.java
+++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java
@@ -191,9 +191,18 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
if (holderToNotify != null) {
// callback that an exception happened, but on a different thread since we don't
// want handlers to worry about stack overflows
- threadPool.generic().execute(new Runnable() {
+ threadPool.generic().execute(new AbstractRunnable() {
@Override
- public void run() {
+ public void onRejection(Throwable t) {
+ // if we get rejected during node shutdown we don't wanna bubble it up
+ logger.debug("failed to notify response handler on rejection, action: {}", t, holderToNotify.action());
+ }
+ @Override
+ public void onFailure(Throwable t) {
+ logger.warn("failed to notify response handler on exception, action: {}", t, holderToNotify.action());
+ }
+ @Override
+ public void doRun() {
holderToNotify.handler().handleException(new TransportException("transport stopped, action: " + holderToNotify.action()));
}
});
@@ -333,11 +342,11 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
@Override
public void onRejection(Throwable t) {
// if we get rejected during node shutdown we don't wanna bubble it up
- logger.debug("failed to notify response handler on rejection", t);
+ logger.debug("failed to notify response handler on rejection, action: {}", t, holderToNotify.action());
}
@Override
public void onFailure(Throwable t) {
- logger.warn("failed to notify response handler on exception", t);
+ logger.warn("failed to notify response handler on exception, action: {}", t, holderToNotify.action());
}
@Override
protected void doRun() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainTests.java
new file mode 100644
index 0000000000..95415ecdbd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainTests.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.allocation;
+
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.test.ESSingleNodeTestCase;
+
+
+/**
+ * Tests for the cluster allocation explanation
+ */
+public final class ClusterAllocationExplainTests extends ESSingleNodeTestCase {
+
+ public void testShardExplain() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings("index.number_of_shards", 1, "index.number_of_replicas", 1).get();
+ client().admin().cluster().health(Requests.clusterHealthRequest("test").waitForYellowStatus()).get();
+ ClusterAllocationExplainResponse resp = client().admin().cluster().prepareAllocationExplain()
+ .setIndex("test").setShard(0).setPrimary(false).get();
+
+ ClusterAllocationExplanation cae = resp.getExplanation();
+ assertNotNull("should always have an explanation", cae);
+ assertEquals("test", cae.getShard().getIndexName());
+ assertEquals(0, cae.getShard().getId());
+ assertEquals(false, cae.isPrimary());
+ assertNull(cae.getAssignedNodeId());
+ assertNotNull(cae.getUnassignedInfo());
+ Decision d = cae.getNodeDecisions().values().iterator().next();
+ assertNotNull("should have a decision", d);
+ assertEquals(Decision.Type.NO, d.type());
+ assertTrue(d.toString(), d.toString().contains("NO(the shard cannot be allocated on the same node id"));
+ assertTrue(d instanceof Decision.Multi);
+ Decision.Multi md = (Decision.Multi) d;
+ Decision ssd = md.getDecisions().get(0);
+ assertEquals(Decision.Type.NO, ssd.type());
+ assertTrue(ssd.toString(), ssd.toString().contains("NO(the shard cannot be allocated on the same node id"));
+ Float weight = cae.getNodeWeights().values().iterator().next();
+ assertNotNull("should have a weight", weight);
+
+ resp = client().admin().cluster().prepareAllocationExplain().setIndex("test").setShard(0).setPrimary(true).get();
+
+ cae = resp.getExplanation();
+ assertNotNull("should always have an explanation", cae);
+ assertEquals("test", cae.getShard().getIndexName());
+ assertEquals(0, cae.getShard().getId());
+ assertEquals(true, cae.isPrimary());
+ assertNotNull("shard should have assigned node id", cae.getAssignedNodeId());
+ assertNull("assigned shard should not have unassigned info", cae.getUnassignedInfo());
+ d = cae.getNodeDecisions().values().iterator().next();
+ assertNotNull("should have a decision", d);
+ assertEquals(Decision.Type.NO, d.type());
+ assertTrue(d.toString(), d.toString().contains("NO(the shard cannot be allocated on the same node id"));
+ assertTrue(d instanceof Decision.Multi);
+ md = (Decision.Multi) d;
+ ssd = md.getDecisions().get(0);
+ assertEquals(Decision.Type.NO, ssd.type());
+ assertTrue(ssd.toString(), ssd.toString().contains("NO(the shard cannot be allocated on the same node id"));
+ weight = cae.getNodeWeights().values().iterator().next();
+ assertNotNull("should have a weight", weight);
+
+ resp = client().admin().cluster().prepareAllocationExplain().useAnyUnassignedShard().get();
+ cae = resp.getExplanation();
+ assertNotNull("should always have an explanation", cae);
+ assertEquals("test", cae.getShard().getIndexName());
+ assertEquals(0, cae.getShard().getId());
+ assertEquals(false, cae.isPrimary());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java
new file mode 100644
index 0000000000..060fb73fbf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+
+/**
+ * Tests for the cluster allocation explanation
+ */
+public final class ClusterAllocationExplanationTests extends ESTestCase {
+
+ public void testDecisionEquality() {
+ Decision.Multi d = new Decision.Multi();
+ Decision.Multi d2 = new Decision.Multi();
+ d.add(Decision.single(Decision.Type.NO, "no label", "because I said no"));
+ d.add(Decision.single(Decision.Type.YES, "yes label", "yes please"));
+ d.add(Decision.single(Decision.Type.THROTTLE, "throttle label", "wait a sec"));
+ d2.add(Decision.single(Decision.Type.NO, "no label", "because I said no"));
+ d2.add(Decision.single(Decision.Type.YES, "yes label", "yes please"));
+ d2.add(Decision.single(Decision.Type.THROTTLE, "throttle label", "wait a sec"));
+ assertEquals(d, d2);
+ }
+
+ public void testExplanationSerialization() throws Exception {
+ ShardId shard = new ShardId("test", "uuid", 0);
+ Map<DiscoveryNode, Decision> nodeToDecisions = new HashMap<>();
+ Map<DiscoveryNode, Float> nodeToWeight = new HashMap<>();
+ for (int i = randomIntBetween(2, 5); i > 0; i--) {
+ DiscoveryNode dn = new DiscoveryNode("node-" + i, DummyTransportAddress.INSTANCE, Version.CURRENT);
+ Decision.Multi d = new Decision.Multi();
+ d.add(Decision.single(Decision.Type.NO, "no label", "because I said no"));
+ d.add(Decision.single(Decision.Type.YES, "yes label", "yes please"));
+ d.add(Decision.single(Decision.Type.THROTTLE, "throttle label", "wait a sec"));
+ nodeToDecisions.put(dn, d);
+ nodeToWeight.put(dn, randomFloat());
+ }
+
+ ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shard, true, "assignedNode", null,
+ nodeToDecisions, nodeToWeight);
+ BytesStreamOutput out = new BytesStreamOutput();
+ cae.writeTo(out);
+ StreamInput in = StreamInput.wrap(out.bytes());
+ ClusterAllocationExplanation cae2 = new ClusterAllocationExplanation(in);
+ assertEquals(shard, cae2.getShard());
+ assertTrue(cae2.isPrimary());
+ assertTrue(cae2.isAssigned());
+ assertEquals("assignedNode", cae2.getAssignedNodeId());
+ assertNull(cae2.getUnassignedInfo());
+ for (Map.Entry<DiscoveryNode, Decision> entry : cae2.getNodeDecisions().entrySet()) {
+ assertEquals(nodeToDecisions.get(entry.getKey()), entry.getValue());
+ }
+ assertEquals(nodeToWeight, cae2.getNodeWeights());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java
index 48d9f8fed4..3d996becba 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java
@@ -219,6 +219,10 @@ public abstract class TaskManagerTestCase extends ESTestCase {
clusterService.close();
transportService.close();
}
+
+ public String getNodeId() {
+ return discoveryNode.getId();
+ }
}
public static void connectNodes(TestNode... nodes) {
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java
index 4b478b52bd..972d9735ef 100644
--- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java
@@ -38,6 +38,7 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@@ -54,6 +55,7 @@ import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
@@ -629,4 +631,76 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
NodesResponse responses = future.get();
assertEquals(0, responses.failureCount());
}
+
+
+ /**
+ * This test starts nodes actions that blocks on all nodes. While node actions are blocked in the middle of execution
+ * it executes a tasks action that targets these blocked node actions. The test verifies that task actions are only
+ * getting executed on nodes that are not listed in the node filter.
+ */
+ public void testTaskNodeFiltering() throws ExecutionException, InterruptedException, IOException {
+ setupTestNodes(Settings.EMPTY);
+ connectNodes(testNodes);
+ CountDownLatch checkLatch = new CountDownLatch(1);
+ // Start some test nodes action so we could have something to run tasks actions on
+ ActionFuture<NodesResponse> future = startBlockingTestNodesAction(checkLatch);
+
+ String[] allNodes = new String[testNodes.length];
+ for (int i = 0; i < testNodes.length; i++) {
+ allNodes[i] = testNodes[i].getNodeId();
+ }
+
+ int filterNodesSize = randomInt(allNodes.length);
+ Set<String> filterNodes = new HashSet<>(randomSubsetOf(filterNodesSize, allNodes));
+ logger.info("Filtering out nodes {} size: {}", filterNodes, filterNodesSize);
+
+ TestTasksAction[] tasksActions = new TestTasksAction[nodesCount];
+ for (int i = 0; i < testNodes.length; i++) {
+ final int node = i;
+ // Simulate a task action that works on all nodes except nodes listed in filterNodes.
+ // We are testing that it works.
+ tasksActions[i] = new TestTasksAction(Settings.EMPTY, "testTasksAction", clusterName, threadPool,
+ testNodes[i].clusterService, testNodes[i].transportService) {
+
+ @Override
+ protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
+ String[] superNodes = super.filterNodeIds(nodes, nodesIds);
+ List<String> filteredNodes = new ArrayList<>();
+ for (String node : superNodes) {
+ if (filterNodes.contains(node) == false) {
+ filteredNodes.add(node);
+ }
+ }
+ return filteredNodes.toArray(new String[filteredNodes.size()]);
+ }
+
+ @Override
+ protected TestTaskResponse taskOperation(TestTasksRequest request, Task task) {
+ return new TestTaskResponse(testNodes[node].getNodeId());
+ }
+ };
+ }
+
+ // Run task action on node tasks that are currently running
+ // should be successful on all nodes except nodes that we filtered out
+ TestTasksRequest testTasksRequest = new TestTasksRequest();
+ testTasksRequest.setActions("testAction[n]"); // pick all test actions
+ TestTasksResponse response = tasksActions[randomIntBetween(0, nodesCount - 1)].execute(testTasksRequest).get();
+
+ // Get successful responses from all nodes except nodes that we filtered out
+ assertEquals(testNodes.length - filterNodes.size(), response.tasks.size());
+ assertEquals(0, response.getTaskFailures().size()); // no task failed
+ assertEquals(0, response.getNodeFailures().size()); // no nodes failed
+
+ // Make sure that filtered nodes didn't send any responses
+ for (TestTaskResponse taskResponse : response.tasks) {
+ String nodeId = taskResponse.getStatus();
+ assertFalse("Found response from filtered node " + nodeId, filterNodes.contains(nodeId));
+ }
+
+ // Release all node tasks and wait for response
+ checkLatch.countDown();
+ NodesResponse responses = future.get();
+ assertEquals(0, responses.failureCount());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java
index cf1cab2416..f66dfa81ea 100644
--- a/core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java
@@ -31,10 +31,8 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.junit.After;
import org.junit.Before;
-import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
-import java.util.List;
+import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
@@ -46,7 +44,6 @@ public class SimulateExecutionServiceTests extends ESTestCase {
private ThreadPool threadPool;
private SimulateExecutionService executionService;
- private Pipeline pipeline;
private Processor processor;
private IngestDocument ingestDocument;
@@ -59,7 +56,6 @@ public class SimulateExecutionServiceTests extends ESTestCase {
);
executionService = new SimulateExecutionService(threadPool);
processor = new TestProcessor("id", "mock", ingestDocument -> {});
- pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor, processor));
ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
}
@@ -68,74 +64,6 @@ public class SimulateExecutionServiceTests extends ESTestCase {
threadPool.shutdown();
}
- public void testExecuteVerboseDocumentSimple() throws Exception {
- List<SimulateProcessorResult> processorResultList = new ArrayList<>();
- executionService.executeVerboseDocument(processor, ingestDocument, processorResultList);
- SimulateProcessorResult result = new SimulateProcessorResult("id", ingestDocument);
- assertThat(processorResultList.size(), equalTo(1));
- assertThat(processorResultList.get(0).getProcessorTag(), equalTo(result.getProcessorTag()));
- assertThat(processorResultList.get(0).getIngestDocument(), equalTo(result.getIngestDocument()));
- assertThat(processorResultList.get(0).getFailure(), nullValue());
- }
-
- public void testExecuteVerboseDocumentSimpleException() throws Exception {
- RuntimeException exception = new RuntimeException("mock_exception");
- TestProcessor processor = new TestProcessor("id", "mock", ingestDocument -> { throw exception; });
- List<SimulateProcessorResult> processorResultList = new ArrayList<>();
- try {
- executionService.executeVerboseDocument(processor, ingestDocument, processorResultList);
- fail("should throw exception");
- } catch (RuntimeException e) {
- assertThat(e.getMessage(), equalTo("mock_exception"));
- }
- SimulateProcessorResult result = new SimulateProcessorResult("id", exception);
- assertThat(processorResultList.size(), equalTo(1));
- assertThat(processorResultList.get(0).getProcessorTag(), equalTo(result.getProcessorTag()));
- assertThat(processorResultList.get(0).getFailure(), equalTo(result.getFailure()));
- }
-
- public void testExecuteVerboseDocumentCompoundSuccess() throws Exception {
- TestProcessor processor1 = new TestProcessor("p1", "mock", ingestDocument -> { });
- TestProcessor processor2 = new TestProcessor("p2", "mock", ingestDocument -> { });
-
- Processor compoundProcessor = new CompoundProcessor(processor1, processor2);
- List<SimulateProcessorResult> processorResultList = new ArrayList<>();
- executionService.executeVerboseDocument(compoundProcessor, ingestDocument, processorResultList);
- assertThat(processor1.getInvokedCounter(), equalTo(1));
- assertThat(processor2.getInvokedCounter(), equalTo(1));
- assertThat(processorResultList.size(), equalTo(2));
- assertThat(processorResultList.get(0).getProcessorTag(), equalTo("p1"));
- assertThat(processorResultList.get(0).getIngestDocument(), equalTo(ingestDocument));
- assertThat(processorResultList.get(0).getFailure(), nullValue());
- assertThat(processorResultList.get(1).getProcessorTag(), equalTo("p2"));
- assertThat(processorResultList.get(1).getIngestDocument(), equalTo(ingestDocument));
- assertThat(processorResultList.get(1).getFailure(), nullValue());
- }
-
- public void testExecuteVerboseDocumentCompoundOnFailure() throws Exception {
- TestProcessor processor1 = new TestProcessor("p1", "mock", ingestDocument -> { });
- TestProcessor processor2 = new TestProcessor("p2", "mock", ingestDocument -> { throw new RuntimeException("p2_exception"); });
- TestProcessor onFailureProcessor1 = new TestProcessor("fail_p1", "mock", ingestDocument -> { });
- TestProcessor onFailureProcessor2 = new TestProcessor("fail_p2", "mock", ingestDocument -> { throw new RuntimeException("fail_p2_exception"); });
- TestProcessor onFailureProcessor3 = new TestProcessor("fail_p3", "mock", ingestDocument -> { });
- CompoundProcessor onFailureCompoundProcessor = new CompoundProcessor(Collections.singletonList(onFailureProcessor2), Collections.singletonList(onFailureProcessor3));
-
- Processor compoundProcessor = new CompoundProcessor(Arrays.asList(processor1, processor2), Arrays.asList(onFailureProcessor1, onFailureCompoundProcessor));
- List<SimulateProcessorResult> processorResultList = new ArrayList<>();
- executionService.executeVerboseDocument(compoundProcessor, ingestDocument, processorResultList);
- assertThat(processor1.getInvokedCounter(), equalTo(1));
- assertThat(processor2.getInvokedCounter(), equalTo(1));
- assertThat(onFailureProcessor1.getInvokedCounter(), equalTo(1));
- assertThat(onFailureProcessor2.getInvokedCounter(), equalTo(1));
- assertThat(onFailureProcessor3.getInvokedCounter(), equalTo(1));
- assertThat(processorResultList.size(), equalTo(5));
- assertThat(processorResultList.get(0).getProcessorTag(), equalTo("p1"));
- assertThat(processorResultList.get(1).getProcessorTag(), equalTo("p2"));
- assertThat(processorResultList.get(2).getProcessorTag(), equalTo("fail_p1"));
- assertThat(processorResultList.get(3).getProcessorTag(), equalTo("fail_p2"));
- assertThat(processorResultList.get(4).getProcessorTag(), equalTo("fail_p3"));
- }
-
public void testExecuteVerboseItem() throws Exception {
TestProcessor processor = new TestProcessor("test-id", "mock", ingestDocument -> {});
Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor, processor));
@@ -170,16 +98,43 @@ public class SimulateExecutionServiceTests extends ESTestCase {
assertThat(simulateDocumentBaseResult.getFailure(), nullValue());
}
- public void testExecuteVerboseItemWithFailure() throws Exception {
+ public void testExecuteVerboseItemExceptionWithoutOnFailure() throws Exception {
+ TestProcessor processor1 = new TestProcessor("processor_0", "mock", ingestDocument -> {});
+ TestProcessor processor2 = new TestProcessor("processor_1", "mock", ingestDocument -> { throw new RuntimeException("processor failed"); });
+ TestProcessor processor3 = new TestProcessor("processor_2", "mock", ingestDocument -> {});
+ Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(processor1, processor2, processor3));
+ SimulateDocumentResult actualItemResponse = executionService.executeDocument(pipeline, ingestDocument, true);
+ assertThat(processor1.getInvokedCounter(), equalTo(1));
+ assertThat(processor2.getInvokedCounter(), equalTo(1));
+ assertThat(processor3.getInvokedCounter(), equalTo(0));
+ assertThat(actualItemResponse, instanceOf(SimulateDocumentVerboseResult.class));
+ SimulateDocumentVerboseResult simulateDocumentVerboseResult = (SimulateDocumentVerboseResult) actualItemResponse;
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().size(), equalTo(2));
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getProcessorTag(), equalTo("processor_0"));
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getFailure(), nullValue());
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument(), not(sameInstance(ingestDocument)));
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument(), equalTo(ingestDocument));
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument().getSourceAndMetadata(), not(sameInstance(ingestDocument.getSourceAndMetadata())));
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getProcessorTag(), equalTo("processor_1"));
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getIngestDocument(), nullValue());
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getFailure(), instanceOf(RuntimeException.class));
+ RuntimeException runtimeException = (RuntimeException) simulateDocumentVerboseResult.getProcessorResults().get(1).getFailure();
+ assertThat(runtimeException.getMessage(), equalTo("processor failed"));
+ }
+
+ public void testExecuteVerboseItemWithOnFailure() throws Exception {
TestProcessor processor1 = new TestProcessor("processor_0", "mock", ingestDocument -> { throw new RuntimeException("processor failed"); });
TestProcessor processor2 = new TestProcessor("processor_1", "mock", ingestDocument -> {});
- Pipeline pipeline = new Pipeline("_id", "_description", new CompoundProcessor(Collections.singletonList(processor1), Collections.singletonList(processor2)));
+ TestProcessor processor3 = new TestProcessor("processor_2", "mock", ingestDocument -> {});
+ Pipeline pipeline = new Pipeline("_id", "_description",
+ new CompoundProcessor(new CompoundProcessor(Collections.singletonList(processor1),
+ Collections.singletonList(processor2)), processor3));
SimulateDocumentResult actualItemResponse = executionService.executeDocument(pipeline, ingestDocument, true);
assertThat(processor1.getInvokedCounter(), equalTo(1));
assertThat(processor2.getInvokedCounter(), equalTo(1));
assertThat(actualItemResponse, instanceOf(SimulateDocumentVerboseResult.class));
SimulateDocumentVerboseResult simulateDocumentVerboseResult = (SimulateDocumentVerboseResult) actualItemResponse;
- assertThat(simulateDocumentVerboseResult.getProcessorResults().size(), equalTo(2));
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().size(), equalTo(3));
assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getProcessorTag(), equalTo("processor_0"));
assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getIngestDocument(), nullValue());
assertThat(simulateDocumentVerboseResult.getProcessorResults().get(0).getFailure(), instanceOf(RuntimeException.class));
@@ -187,8 +142,20 @@ public class SimulateExecutionServiceTests extends ESTestCase {
assertThat(runtimeException.getMessage(), equalTo("processor failed"));
assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getProcessorTag(), equalTo("processor_1"));
assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getIngestDocument(), not(sameInstance(ingestDocument)));
- assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getIngestDocument(), equalTo(ingestDocument));
+
+ IngestDocument ingestDocumentWithOnFailureMetadata = new IngestDocument(ingestDocument);
+ Map<String, String> metadata = ingestDocumentWithOnFailureMetadata.getIngestMetadata();
+ metadata.put(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD, "mock");
+ metadata.put(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD, "processor_0");
+ metadata.put(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD, "processor failed");
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getIngestDocument(), equalTo(ingestDocumentWithOnFailureMetadata));
+
assertThat(simulateDocumentVerboseResult.getProcessorResults().get(1).getFailure(), nullValue());
+
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(2).getProcessorTag(), equalTo("processor_2"));
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(2).getIngestDocument(), not(sameInstance(ingestDocument)));
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(2).getIngestDocument(), equalTo(ingestDocument));
+ assertThat(simulateDocumentVerboseResult.getProcessorResults().get(2).getFailure(), nullValue());
}
public void testExecuteItemWithFailure() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java
index b166f5f45c..5d17232735 100644
--- a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java
@@ -34,7 +34,9 @@ import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
@@ -212,10 +214,12 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(new Index(index, "_na_"));
int shardIndex = -1;
+ int totalIndexShards = 0;
for (int i = 0; i < numberOfNodes; i++) {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.put(node);
int numberOfShards = randomIntBetween(1, 10);
+ totalIndexShards += numberOfShards;
for (int j = 0; j < numberOfShards; j++) {
final ShardId shardId = new ShardId(index, "_na_", ++shardIndex);
ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.id(), true, ShardRoutingState.STARTED);
@@ -228,6 +232,12 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
discoBuilder.masterNodeId(newNode(numberOfNodes - 1).id());
ClusterState.Builder stateBuilder = ClusterState.builder(new ClusterName(TEST_CLUSTER));
stateBuilder.nodes(discoBuilder);
+ final IndexMetaData.Builder indexMetaData = IndexMetaData.builder(index)
+ .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
+ .numberOfReplicas(0)
+ .numberOfShards(totalIndexShards);
+
+ stateBuilder.metaData(MetaData.builder().put(indexMetaData));
stateBuilder.routingTable(RoutingTable.builder().add(indexRoutingTable.build()).build());
ClusterState clusterState = stateBuilder.build();
setState(clusterService, clusterState);
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java
index 4125f02b95..9170ff2e5a 100644
--- a/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java
@@ -142,7 +142,7 @@ public class BroadcastReplicationTests extends ESTestCase {
public void testResultCombine() throws InterruptedException, ExecutionException, IOException {
final String index = "test";
- int numShards = randomInt(3);
+ int numShards = 1 + randomInt(3);
setState(clusterService, stateWithAssignedPrimariesAndOneReplica(index, numShards));
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Future<BroadcastResponse> response = (broadcastReplicationAction.execute(new DummyBroadcastRequest().indices(index)));
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java
index 6b38d35c63..bfe2922906 100644
--- a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java
@@ -45,6 +45,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
import static org.elasticsearch.test.ESTestCase.randomFrom;
+import static org.elasticsearch.test.ESTestCase.randomInt;
import static org.elasticsearch.test.ESTestCase.randomIntBetween;
/**
@@ -84,10 +85,11 @@ public class ClusterStateCreationUtils {
}
discoBuilder.localNodeId(newNode(0).id());
discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures
+ final int primaryTerm = randomInt(200);
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
- .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
+ .put(SETTING_CREATION_DATE, System.currentTimeMillis())).primaryTerm(0, primaryTerm).build();
RoutingTable.Builder routing = new RoutingTable.Builder();
routing.addAsNew(indexMetaData);
@@ -111,7 +113,8 @@ public class ClusterStateCreationUtils {
} else {
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null);
}
- indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, null, true, primaryState, unassignedInfo));
+ indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, null, true,
+ primaryState, unassignedInfo));
for (ShardRoutingState replicaState : replicaStates) {
String replicaNode = null;
@@ -152,7 +155,7 @@ public class ClusterStateCreationUtils {
discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
- .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
state.nodes(discoBuilder);
@@ -163,8 +166,10 @@ public class ClusterStateCreationUtils {
routing.addAsNew(indexMetaData);
final ShardId shardId = new ShardId(index, "_na_", i);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
- indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).id(), null, null, true, ShardRoutingState.STARTED, null));
- indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).id(), null, null, false, ShardRoutingState.STARTED, null));
+ indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).id(), null, null, true,
+ ShardRoutingState.STARTED, null));
+ indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).id(), null, null, false,
+ ShardRoutingState.STARTED, null));
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
}
state.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build());
@@ -229,12 +234,13 @@ public class ClusterStateCreationUtils {
/**
* Creates a cluster state where local node and master node can be specified
+ *
* @param localNode node in allNodes that is the local node
* @param masterNode node in allNodes that is the master node. Can be null if no master exists
* @param allNodes all nodes in the cluster
* @return cluster state
*/
- public static ClusterState state(DiscoveryNode localNode, DiscoveryNode masterNode, DiscoveryNode... allNodes) {
+ public static ClusterState state(DiscoveryNode localNode, DiscoveryNode masterNode, DiscoveryNode... allNodes) {
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
for (DiscoveryNode node : allNodes) {
discoBuilder.put(node);
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
index 1fc94dcb53..446ad74e8b 100644
--- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java
@@ -630,11 +630,13 @@ public class TransportReplicationActionTests extends ESTestCase {
final ShardIterator shardIt = shardRoutingTable.shardsIt();
final ShardId shardId = shardIt.shardId();
final Request request = new Request(shardId);
+ final long primaryTerm = randomInt(200);
+ request.primaryTerm(primaryTerm);
final PlainActionFuture<Response> listener = new PlainActionFuture<>();
ReplicationTask task = maybeTask();
logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint());
- TransportReplicationAction.IndexShardReference reference = getOrCreateIndexShardOperationsCounter();
+ TransportReplicationAction.IndexShardReference reference = getOrCreateIndexShardOperationsCounter(0);
ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
indexShardRouting.set(primaryShard);
@@ -767,6 +769,9 @@ public class TransportReplicationActionTests extends ESTestCase {
}
// all replicas have responded so the counter should be decreased again
assertIndexShardCounter(1);
+
+ // assert that nothing in the replica logic changes the primary term of the operation
+ assertThat(request.primaryTerm(), equalTo(primaryTerm));
}
public void testCounterOnPrimary() throws Exception {
@@ -989,7 +994,7 @@ public class TransportReplicationActionTests extends ESTestCase {
/**
* Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run.
*/
- private synchronized TransportReplicationAction.IndexShardReference getOrCreateIndexShardOperationsCounter() {
+ private synchronized TransportReplicationAction.IndexShardReference getOrCreateIndexShardOperationsCounter(long primaryTerm) {
count.incrementAndGet();
return new TransportReplicationAction.IndexShardReference() {
@Override
@@ -1010,6 +1015,11 @@ public class TransportReplicationActionTests extends ESTestCase {
}
@Override
+ public long opPrimaryTerm() {
+ return primaryTerm;
+ }
+
+ @Override
public void close() {
count.decrementAndGet();
}
@@ -1104,13 +1114,15 @@ public class TransportReplicationActionTests extends ESTestCase {
return false;
}
+
@Override
protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) {
- return getOrCreateIndexShardOperationsCounter();
+ final IndexMetaData indexMetaData = clusterService.state().metaData().index(shardId.getIndex());
+ return getOrCreateIndexShardOperationsCounter(indexMetaData.primaryTerm(shardId.id()));
}
- protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId) {
- return getOrCreateIndexShardOperationsCounter();
+ protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId, long opPrimaryTerm) {
+ return getOrCreateIndexShardOperationsCounter(opPrimaryTerm);
}
}
diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java
index 4816c6038e..27f26e7c8c 100644
--- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java
+++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapCheckTests.java
@@ -32,6 +32,8 @@ import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.not;
public class BootstrapCheckTests extends ESTestCase {
@@ -80,9 +82,9 @@ public class BootstrapCheckTests extends ESTestCase {
public void testFileDescriptorLimitsThrowsOnInvalidLimit() {
final IllegalArgumentException e =
- expectThrows(
- IllegalArgumentException.class,
- () -> new BootstrapCheck.FileDescriptorCheck(-randomIntBetween(0, Integer.MAX_VALUE)));
+ expectThrows(
+ IllegalArgumentException.class,
+ () -> new BootstrapCheck.FileDescriptorCheck(-randomIntBetween(0, Integer.MAX_VALUE)));
assertThat(e.getMessage(), containsString("limit must be positive but was"));
}
@@ -121,8 +123,8 @@ public class BootstrapCheckTests extends ESTestCase {
fail("should have failed due to memory not being locked");
} catch (final RuntimeException e) {
assertThat(
- e.getMessage(),
- containsString("memory locking requested for elasticsearch process but memory is not locked"));
+ e.getMessage(),
+ containsString("memory locking requested for elasticsearch process but memory is not locked"));
}
} else {
// nothing should happen
@@ -197,4 +199,12 @@ public class BootstrapCheckTests extends ESTestCase {
assertTrue(BootstrapCheck.enforceLimits(settings));
}
+ public void testMinMasterNodes() {
+ boolean isSet = randomBoolean();
+ BootstrapCheck.Check check = new BootstrapCheck.MinMasterNodesCheck(isSet);
+ assertThat(check.check(), not(equalTo(isSet)));
+ List<BootstrapCheck.Check> defaultChecks = BootstrapCheck.checks(Settings.EMPTY);
+
+ expectThrows(RuntimeException.class, () -> BootstrapCheck.check(true, defaultChecks));
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java
index 4a930bc9c2..016f70f51b 100644
--- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java
@@ -22,6 +22,7 @@ package org.elasticsearch.cluster;
import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
import org.elasticsearch.cluster.metadata.IndexTemplateFilter;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
@@ -39,6 +40,8 @@ import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
+import java.util.HashMap;
+import java.util.Map;
public class ClusterModuleTests extends ModuleTestCase {
public static class FakeAllocationDecider extends AllocationDecider {
@@ -52,6 +55,11 @@ public class ClusterModuleTests extends ModuleTestCase {
public boolean allocate(RoutingAllocation allocation) {
return false;
}
+
+ @Override
+ public Map<DiscoveryNode, Float> weighShard(RoutingAllocation allocation, ShardRouting shard) {
+ return new HashMap<>();
+ }
}
static class FakeIndexTemplateFilter implements IndexTemplateFilter {
diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java
index 5886158506..f7e8b18196 100644
--- a/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java
@@ -41,11 +41,14 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
.put(IndexMetaData.builder("test1")
.settings(settings(Version.CURRENT))
.numberOfShards(1)
- .numberOfReplicas(2))
+ .numberOfReplicas(2)
+ .primaryTerm(0, 1))
.put(IndexMetaData.builder("test2")
.settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
.numberOfShards(2)
- .numberOfReplicas(3))
+ .numberOfReplicas(3)
+ .primaryTerm(0, 2)
+ .primaryTerm(1, 2))
.put(IndexMetaData.builder("test3")
.settings(settings(Version.CURRENT))
.numberOfShards(1)
@@ -112,15 +115,15 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
.putAlias(newAliasMetaDataBuilder("alias1").filter(ALIAS_FILTER1))
.putAlias(newAliasMetaDataBuilder("alias2"))
.putAlias(newAliasMetaDataBuilder("alias4").filter(ALIAS_FILTER2)))
- .put(IndexTemplateMetaData.builder("foo")
- .template("bar")
- .order(1)
- .settings(settingsBuilder()
- .put("setting1", "value1")
- .put("setting2", "value2"))
- .putAlias(newAliasMetaDataBuilder("alias-bar1"))
- .putAlias(newAliasMetaDataBuilder("alias-bar2").filter("{\"term\":{\"user\":\"kimchy\"}}"))
- .putAlias(newAliasMetaDataBuilder("alias-bar3").routing("routing-bar")))
+ .put(IndexTemplateMetaData.builder("foo")
+ .template("bar")
+ .order(1)
+ .settings(settingsBuilder()
+ .put("setting1", "value1")
+ .put("setting2", "value2"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar1"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar2").filter("{\"term\":{\"user\":\"kimchy\"}}"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar3").routing("routing-bar")))
.put(IndexMetaData.builder("test12")
.settings(settings(Version.CURRENT)
.put("setting1", "value1")
@@ -133,15 +136,15 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
.putAlias(newAliasMetaDataBuilder("alias1").filter(ALIAS_FILTER1))
.putAlias(newAliasMetaDataBuilder("alias2"))
.putAlias(newAliasMetaDataBuilder("alias4").filter(ALIAS_FILTER2)))
- .put(IndexTemplateMetaData.builder("foo")
- .template("bar")
- .order(1)
- .settings(settingsBuilder()
- .put("setting1", "value1")
- .put("setting2", "value2"))
- .putAlias(newAliasMetaDataBuilder("alias-bar1"))
- .putAlias(newAliasMetaDataBuilder("alias-bar2").filter("{\"term\":{\"user\":\"kimchy\"}}"))
- .putAlias(newAliasMetaDataBuilder("alias-bar3").routing("routing-bar")))
+ .put(IndexTemplateMetaData.builder("foo")
+ .template("bar")
+ .order(1)
+ .settings(settingsBuilder()
+ .put("setting1", "value1")
+ .put("setting2", "value2"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar1"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar2").filter("{\"term\":{\"user\":\"kimchy\"}}"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar3").routing("routing-bar")))
.build();
String metaDataSource = MetaData.Builder.toXContent(metaData);
@@ -150,6 +153,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
MetaData parsedMetaData = MetaData.Builder.fromXContent(XContentFactory.xContent(XContentType.JSON).createParser(metaDataSource));
IndexMetaData indexMetaData = parsedMetaData.index("test1");
+ assertThat(indexMetaData.primaryTerm(0), equalTo(1L));
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(-1L));
@@ -159,6 +163,8 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
indexMetaData = parsedMetaData.index("test2");
assertThat(indexMetaData.getNumberOfShards(), equalTo(2));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(3));
+ assertThat(indexMetaData.primaryTerm(0), equalTo(2L));
+ assertThat(indexMetaData.primaryTerm(1), equalTo(2L));
assertThat(indexMetaData.getCreationDate(), equalTo(-1L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java
new file mode 100644
index 0000000000..d9b74621cc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.health.ClusterStateHealth;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.node.DiscoveryNodes.Builder;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ESAllocationTestCase;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+public class PrimaryTermsTests extends ESAllocationTestCase {
+
+ private static final String TEST_INDEX_1 = "test1";
+ private static final String TEST_INDEX_2 = "test2";
+ private RoutingTable testRoutingTable;
+ private int numberOfShards;
+ private int numberOfReplicas;
+ private final static Settings DEFAULT_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+ private AllocationService allocationService;
+ private ClusterState clusterState;
+
+ private final Map<String, long[]> primaryTermsPerIndex = new HashMap<>();
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ this.allocationService = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", Integer.MAX_VALUE) // don't limit recoveries
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", Integer.MAX_VALUE)
+ .build());
+ this.numberOfShards = randomIntBetween(1, 5);
+ this.numberOfReplicas = randomIntBetween(1, 5);
+ logger.info("Setup test with " + this.numberOfShards + " shards and " + this.numberOfReplicas + " replicas.");
+ this.primaryTermsPerIndex.clear();
+ MetaData metaData = MetaData.builder()
+ .put(createIndexMetaData(TEST_INDEX_1))
+ .put(createIndexMetaData(TEST_INDEX_2))
+ .build();
+
+ this.testRoutingTable = new RoutingTable.Builder()
+ .add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_1).getIndex()).initializeAsNew(metaData.index(TEST_INDEX_1))
+ .build())
+ .add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_2).getIndex()).initializeAsNew(metaData.index(TEST_INDEX_2))
+ .build())
+ .build();
+
+ this.clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData)
+ .routingTable(testRoutingTable).build();
+ }
+
+ /**
+ * puts primary shard routings into initializing state
+ */
+ private void initPrimaries() {
+ logger.info("adding " + (this.numberOfReplicas + 1) + " nodes and performing rerouting");
+ Builder discoBuilder = DiscoveryNodes.builder();
+ for (int i = 0; i < this.numberOfReplicas + 1; i++) {
+ discoBuilder = discoBuilder.put(newNode("node" + i));
+ }
+ this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build();
+ RoutingAllocation.Result rerouteResult = allocationService.reroute(clusterState, "reroute");
+ this.testRoutingTable = rerouteResult.routingTable();
+ assertThat(rerouteResult.changed(), is(true));
+ applyRerouteResult(rerouteResult);
+ primaryTermsPerIndex.keySet().forEach(this::incrementPrimaryTerm);
+ }
+
+ private void incrementPrimaryTerm(String index) {
+ final long[] primaryTerms = primaryTermsPerIndex.get(index);
+ for (int i = 0; i < primaryTerms.length; i++) {
+ primaryTerms[i]++;
+ }
+ }
+
+ private void incrementPrimaryTerm(String index, int shard) {
+ primaryTermsPerIndex.get(index)[shard]++;
+ }
+
+ private boolean startInitializingShards(String index) {
+ this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build();
+ final List<ShardRouting> startedShards = this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING);
+ logger.info("start primary shards for index [{}]: {} ", index, startedShards);
+ RoutingAllocation.Result rerouteResult = allocationService.applyStartedShards(this.clusterState, startedShards);
+ applyRerouteResult(rerouteResult);
+ return rerouteResult.changed();
+ }
+
+ private void applyRerouteResult(RoutingAllocation.Result rerouteResult) {
+ ClusterState previousClusterState = this.clusterState;
+ ClusterState newClusterState = ClusterState.builder(previousClusterState).routingResult(rerouteResult).build();
+ ClusterState.Builder builder = ClusterState.builder(newClusterState).incrementVersion();
+ if (previousClusterState.routingTable() != newClusterState.routingTable()) {
+ builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1)
+ .build());
+ }
+ if (previousClusterState.metaData() != newClusterState.metaData()) {
+ builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
+ }
+ this.clusterState = builder.build();
+ this.testRoutingTable = rerouteResult.routingTable();
+ final ClusterStateHealth clusterHealth = new ClusterStateHealth(clusterState);
+ logger.info("applied reroute. active shards: p [{}], t [{}], init shards: [{}], relocating: [{}]",
+ clusterHealth.getActivePrimaryShards(), clusterHealth.getActiveShards(),
+ clusterHealth.getInitializingShards(), clusterHealth.getRelocatingShards());
+ }
+
+ private void failSomePrimaries(String index) {
+ this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build();
+ final IndexRoutingTable indexShardRoutingTable = testRoutingTable.index(index);
+ Set<Integer> shardIdsToFail = new HashSet<>();
+ for (int i = 1 + randomInt(numberOfShards - 1); i > 0; i--) {
+ shardIdsToFail.add(randomInt(numberOfShards - 1));
+ }
+ logger.info("failing primary shards {} for index [{}]", shardIdsToFail, index);
+ List<FailedRerouteAllocation.FailedShard> failedShards = new ArrayList<>();
+ for (int shard : shardIdsToFail) {
+ failedShards.add(new FailedRerouteAllocation.FailedShard(indexShardRoutingTable.shard(shard).primaryShard(), "test", null));
+ incrementPrimaryTerm(index, shard); // the primary failure should increment the primary term;
+ }
+ RoutingAllocation.Result rerouteResult = allocationService.applyFailedShards(this.clusterState, failedShards);
+ applyRerouteResult(rerouteResult);
+ }
+
+ private void addNodes() {
+ DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
+ final int newNodes = randomInt(10);
+ logger.info("adding [{}] nodes", newNodes);
+ for (int i = 0; i < newNodes; i++) {
+ nodesBuilder.put(newNode("extra_" + i));
+ }
+ this.clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
+ RoutingAllocation.Result rerouteResult = allocationService.reroute(this.clusterState, "nodes added");
+ applyRerouteResult(rerouteResult);
+
+ }
+
+ private IndexMetaData.Builder createIndexMetaData(String indexName) {
+ primaryTermsPerIndex.put(indexName, new long[numberOfShards]);
+ final IndexMetaData.Builder builder = new IndexMetaData.Builder(indexName)
+ .settings(DEFAULT_SETTINGS)
+ .numberOfReplicas(this.numberOfReplicas)
+ .numberOfShards(this.numberOfShards);
+ for (int i = 0; i < numberOfShards; i++) {
+ builder.primaryTerm(i, randomInt(200));
+ primaryTermsPerIndex.get(indexName)[i] = builder.primaryTerm(i);
+ }
+ return builder;
+ }
+
+ private void assertAllPrimaryTerm() {
+ primaryTermsPerIndex.keySet().forEach(this::assertPrimaryTerm);
+ }
+
+ private void assertPrimaryTerm(String index) {
+ final long[] terms = primaryTermsPerIndex.get(index);
+ final IndexMetaData indexMetaData = clusterState.metaData().index(index);
+ for (IndexShardRoutingTable shardRoutingTable : this.testRoutingTable.index(index)) {
+ final int shard = shardRoutingTable.shardId().id();
+ assertThat("primary term mismatch between indexMetaData of [" + index + "] and shard [" + shard + "]'s routing",
+ indexMetaData.primaryTerm(shard), equalTo(terms[shard]));
+ }
+ }
+
+ public void testPrimaryTermMetaDataSync() {
+ assertAllPrimaryTerm();
+
+ initPrimaries();
+ assertAllPrimaryTerm();
+
+ startInitializingShards(TEST_INDEX_1);
+ assertAllPrimaryTerm();
+
+ startInitializingShards(TEST_INDEX_2);
+ assertAllPrimaryTerm();
+
+ // now start all replicas too
+ startInitializingShards(TEST_INDEX_1);
+ startInitializingShards(TEST_INDEX_2);
+ assertAllPrimaryTerm();
+
+ // relocations shouldn't change much
+ addNodes();
+ assertAllPrimaryTerm();
+ boolean changed = true;
+ while (changed) {
+ changed = startInitializingShards(TEST_INDEX_1);
+ assertAllPrimaryTerm();
+ changed |= startInitializingShards(TEST_INDEX_2);
+ assertAllPrimaryTerm();
+ }
+
+ // primary promotion
+ failSomePrimaries(TEST_INDEX_1);
+ assertAllPrimaryTerm();
+
+ // stablize cluster
+ changed = true;
+ while (changed) {
+ changed = startInitializingShards(TEST_INDEX_1);
+ assertAllPrimaryTerm();
+ changed |= startInitializingShards(TEST_INDEX_2);
+ assertAllPrimaryTerm();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
index 56a66b52d6..f1495bb5e7 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
@@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
@@ -44,6 +45,8 @@ import org.elasticsearch.test.ESAllocationTestCase;
import org.elasticsearch.test.gateway.NoopGatewayAllocator;
import org.hamcrest.Matchers;
+import java.util.HashMap;
+import java.util.Map;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
@@ -313,6 +316,9 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()),
NoopGatewayAllocator.INSTANCE, new ShardsAllocator() {
+ public Map<DiscoveryNode, Float> weighShard(RoutingAllocation allocation, ShardRouting shard) {
+ return new HashMap<DiscoveryNode, Float>();
+ }
/*
* // this allocator tries to rebuild this scenario where a rebalance is
* // triggered solely by the primary overload on node [1] where a shard
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java
index be40351019..0bd8441312 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java
@@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
-import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESAllocationTestCase;
import java.io.BufferedReader;
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
index 7e59ab8a6b..b18ee32ff5 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
@@ -58,29 +58,31 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
- clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
- RoutingTable prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute");
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ result = strategy.reroute(clusterState, "reroute");
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
logger.info("Start the primary shard (on node1)");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ result = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
logger.info("Start the backup shard (on node2)");
routingNodes = clusterState.getRoutingNodes();
- prevRoutingTable = routingTable;
- routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ result = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
logger.info("Adding third node and reroute and kill first node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3")).remove("node1")).build();
- prevRoutingTable = routingTable;
- routingTable = strategy.reroute(clusterState, "reroute").routingTable();
- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingTable prevRoutingTable = clusterState.routingTable();
+ result = strategy.reroute(clusterState, "reroute");
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
routingNodes = clusterState.getRoutingNodes();
+ routingTable = clusterState.routingTable();
assertThat(prevRoutingTable != routingTable, equalTo(true));
assertThat(routingTable.index("test").shards().size(), equalTo(1));
@@ -89,6 +91,7 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
// verify where the primary is
assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(clusterState.metaData().index("test").primaryTerm(0), equalTo(2L));
assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3"));
}
@@ -110,16 +113,18 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
logger.info("Start the primary shards");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
rerouteResult = allocation.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(2));
assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.metaData().index("test").primaryTerm(0), equalTo(1L));
+ assertThat(clusterState.metaData().index("test").primaryTerm(1), equalTo(1L));
// now, fail one node, while the replica is initializing, and it also holds a primary
logger.info("--> fail node with primary");
@@ -129,12 +134,13 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
.put(newNode(nodeIdRemaining))
).build();
rerouteResult = allocation.reroute(clusterState, "reroute");
- clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
routingNodes = clusterState.getRoutingNodes();
assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(1));
assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(routingNodes.node(nodeIdRemaining).shardsWithState(INITIALIZING).get(0).primary(), equalTo(true));
+ assertThat(clusterState.metaData().index("test").primaryTerm(0), equalTo(2L));
}
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java
new file mode 100644
index 0000000000..38c1575042
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardStateIT.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ESIntegTestCase;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ShardStateIT extends ESIntegTestCase {
+
+ public void testPrimaryFailureIncreasesTerm() throws Exception {
+ internalCluster().ensureAtLeastNumDataNodes(2);
+ prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).get();
+ ensureGreen();
+ assertPrimaryTerms(1, 1);
+
+ logger.info("--> disabling allocation to capture shard failure");
+ disableAllocation("test");
+
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ final int shard = randomBoolean() ? 0 : 1;
+ final String nodeId = state.routingTable().index("test").shard(shard).primaryShard().currentNodeId();
+ final String node = state.nodes().get(nodeId).name();
+ logger.info("--> failing primary of [{}] on node [{}]", shard, node);
+ IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
+ indicesService.indexService(resolveIndex("test")).getShard(shard).failShard("simulated test failure", null);
+
+ logger.info("--> waiting for a yellow index");
+ assertBusy(() -> assertThat(client().admin().cluster().prepareHealth().get().getStatus(), equalTo(ClusterHealthStatus.YELLOW)));
+
+ final long term0 = shard == 0 ? 2 : 1;
+ final long term1 = shard == 1 ? 2 : 1;
+ assertPrimaryTerms(term0, term1);
+
+ logger.info("--> enabling allocation");
+ enableAllocation("test");
+ ensureGreen();
+ assertPrimaryTerms(term0, term1);
+ }
+
+ protected void assertPrimaryTerms(long term0, long term1) {
+ for (String node : internalCluster().getNodeNames()) {
+ logger.debug("--> asserting primary terms terms on [{}]", node);
+ ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState();
+ IndexMetaData metaData = state.metaData().index("test");
+ assertThat(metaData.primaryTerm(0), equalTo(term0));
+ assertThat(metaData.primaryTerm(1), equalTo(term1));
+ IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
+ IndexService indexService = indicesService.indexService(metaData.getIndex());
+ if (indexService != null) {
+ for (IndexShard shard : indexService) {
+ assertThat("term mismatch for shard " + shard.shardId(),
+ shard.getPrimaryTerm(), equalTo(metaData.primaryTerm(shard.shardId().id())));
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
index 928756fec0..260a33780a 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
@@ -59,6 +59,7 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
@@ -794,7 +795,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
fail("should not have been able to reroute the shard");
} catch (IllegalArgumentException e) {
assertThat("can't allocated because there isn't enough room: " + e.getMessage(),
- e.getMessage().contains("more than allowed [70.0%] used disk on node, free: [26.0%]"), equalTo(true));
+ e.getMessage(),
+ containsString("the node is above the low watermark and has more than allowed [70.0%] used disk, free: [26.0%]"));
}
}
diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java
index 9311db44da..63f6ecd0e6 100644
--- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java
+++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java
@@ -50,15 +50,7 @@ public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> exte
public static void init() {
if (namedWriteableRegistry == null) {
namedWriteableRegistry = new NamedWriteableRegistry();
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE);
+ ShapeBuilders.register(namedWriteableRegistry);
}
}
@@ -146,8 +138,7 @@ public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> exte
try (BytesStreamOutput output = new BytesStreamOutput()) {
original.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
- ShapeBuilder prototype = (ShapeBuilder) namedWriteableRegistry.getPrototype(ShapeBuilder.class, original.getWriteableName());
- return prototype.readFrom(in);
+ return namedWriteableRegistry.getReader(ShapeBuilder.class, original.getWriteableName()).read(in);
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java
index 881db868ef..c2730f91df 100644
--- a/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java
@@ -19,30 +19,21 @@
package org.elasticsearch.common.geo.builders;
-import org.locationtech.spatial4j.shape.Rectangle;
import com.vividsolutions.jts.geom.Coordinate;
+
import org.elasticsearch.test.geo.RandomShapeGenerator;
+import org.locationtech.spatial4j.shape.Rectangle;
import java.io.IOException;
-import static org.hamcrest.Matchers.equalTo;
-
public class EnvelopeBuilderTests extends AbstractShapeBuilderTestCase<EnvelopeBuilder> {
public void testInvalidConstructorArgs() {
- try {
- new EnvelopeBuilder(null, new Coordinate(1.0, -1.0));
- fail("Exception expected");
- } catch (NullPointerException e) {
- assertThat("topLeft of envelope cannot be null", equalTo(e.getMessage()));
- }
-
- try {
- new EnvelopeBuilder(new Coordinate(1.0, -1.0), null);
- fail("Exception expected");
- } catch (NullPointerException e) {
- assertThat("bottomRight of envelope cannot be null", equalTo(e.getMessage()));
- }
+ NullPointerException e;
+ e = expectThrows(NullPointerException.class, () -> new EnvelopeBuilder(null, new Coordinate(1.0, -1.0)));
+ assertEquals("topLeft of envelope cannot be null", e.getMessage());
+ e = expectThrows(NullPointerException.class, () -> new EnvelopeBuilder(new Coordinate(1.0, -1.0), null));
+ assertEquals("bottomRight of envelope cannot be null", e.getMessage());
}
@Override
@@ -60,16 +51,21 @@ public class EnvelopeBuilderTests extends AbstractShapeBuilderTestCase<EnvelopeB
// move one corner to the middle of original
switch (randomIntBetween(0, 3)) {
case 0:
- mutation = new EnvelopeBuilder(new Coordinate(randomDoubleBetween(-180.0, original.bottomRight().x, true), original.topLeft().y), original.bottomRight());
+ mutation = new EnvelopeBuilder(
+ new Coordinate(randomDoubleBetween(-180.0, original.bottomRight().x, true), original.topLeft().y),
+ original.bottomRight());
break;
case 1:
- mutation = new EnvelopeBuilder(new Coordinate(original.topLeft().x, randomDoubleBetween(original.bottomRight().y, 90.0, true)), original.bottomRight());
+ mutation = new EnvelopeBuilder(new Coordinate(original.topLeft().x, randomDoubleBetween(original.bottomRight().y, 90.0, true)),
+ original.bottomRight());
break;
case 2:
- mutation = new EnvelopeBuilder(original.topLeft(), new Coordinate(randomDoubleBetween(original.topLeft().x, 180.0, true), original.bottomRight().y));
+ mutation = new EnvelopeBuilder(original.topLeft(),
+ new Coordinate(randomDoubleBetween(original.topLeft().x, 180.0, true), original.bottomRight().y));
break;
case 3:
- mutation = new EnvelopeBuilder(original.topLeft(), new Coordinate(original.bottomRight().x, randomDoubleBetween(-90.0, original.topLeft().y, true)));
+ mutation = new EnvelopeBuilder(original.topLeft(),
+ new Coordinate(original.bottomRight().x, randomDoubleBetween(-90.0, original.topLeft().y, true)));
break;
}
return mutation;
diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java
index f6fcf8449d..e96c35287c 100644
--- a/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java
@@ -27,31 +27,15 @@ import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType;
import java.io.IOException;
import java.util.List;
-import static org.hamcrest.Matchers.equalTo;
-
public class LineStringBuilderTests extends AbstractShapeBuilderTestCase<LineStringBuilder> {
public void testInvalidConstructorArgs() {
- try {
- new LineStringBuilder((List<Coordinate>) null);
- fail("Exception expected");
- } catch (IllegalArgumentException e) {
- assertThat("cannot create point collection with empty set of points", equalTo(e.getMessage()));
- }
-
- try {
- new LineStringBuilder(new CoordinatesBuilder());
- fail("Exception expected");
- } catch (IllegalArgumentException e) {
- assertThat("cannot create point collection with empty set of points", equalTo(e.getMessage()));
- }
-
- try {
- new LineStringBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0));
- fail("Exception expected");
- } catch (IllegalArgumentException e) {
- assertThat("invalid number of points in LineString (found [1] - must be >= 2)", equalTo(e.getMessage()));
- }
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new LineStringBuilder((List<Coordinate>) null));
+ assertEquals("cannot create point collection with empty set of points", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> new LineStringBuilder(new CoordinatesBuilder()));
+ assertEquals("cannot create point collection with empty set of points", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> new LineStringBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0)));
+ assertEquals("invalid number of points in LineString (found [1] - must be >= 2)", e.getMessage());
}
@Override
diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java
index 3c618fd369..925d177c57 100644
--- a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java
@@ -68,9 +68,6 @@ public class MultiLineStringBuilderTests extends AbstractShapeBuilderTestCase<Mu
}
static MultiLineStringBuilder createRandomShape() {
- if (true) {
- return new MultiLineStringBuilder();
- }
- return (MultiLineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTILINESTRING);
+ return new MultiLineStringBuilder();
}
}
diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java
index 006064578e..ec2eb50bd3 100644
--- a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java
@@ -20,29 +20,20 @@
package org.elasticsearch.common.geo.builders;
import com.vividsolutions.jts.geom.Coordinate;
+
import org.elasticsearch.test.geo.RandomShapeGenerator;
import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType;
import java.io.IOException;
-
-import static org.hamcrest.Matchers.equalTo;
+import java.util.List;
public class MultiPointBuilderTests extends AbstractShapeBuilderTestCase<MultiPointBuilder> {
public void testInvalidBuilderException() {
- try {
- new MultiPointBuilder(null);
- fail("IllegalArgumentException expected");
- } catch (IllegalArgumentException e) {
- assertThat("cannot create point collection with empty set of points", equalTo(e.getMessage()));
- }
-
- try {
- new MultiPointBuilder(new CoordinatesBuilder().build());
- fail("IllegalArgumentException expected");
- } catch (IllegalArgumentException e) {
- assertThat("cannot create point collection with empty set of points", equalTo(e.getMessage()));
- }
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MultiPointBuilder((List<Coordinate>) null));
+ assertEquals("cannot create point collection with empty set of points", e.getMessage());
+ e = expectThrows(IllegalArgumentException.class, () -> new MultiPointBuilder(new CoordinatesBuilder().build()));
+ assertEquals("cannot create point collection with empty set of points", e.getMessage());
// one point is minimum
new MultiPointBuilder(new CoordinatesBuilder().coordinate(0.0, 0.0).build());
diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java
index 24e0bc8571..9a35690f94 100644
--- a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java
@@ -80,7 +80,8 @@ public class PolygonBuilderTests extends AbstractShapeBuilderTestCase<PolygonBui
* This is done so we don't have to expose a setter for orientation in the actual class
*/
private static PolygonBuilder polyWithOposingOrientation(PolygonBuilder pb) {
- PolygonBuilder mutation = new PolygonBuilder(pb.shell(), pb.orientation() == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT);
+ PolygonBuilder mutation = new PolygonBuilder(pb.shell(),
+ pb.orientation() == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT);
for (LineStringBuilder hole : pb.holes()) {
mutation.hole(hole);
}
diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java
index 80bad2e1ec..7a2828c0a1 100644
--- a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java
+++ b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java
@@ -29,8 +29,10 @@ import java.io.IOException;
import java.util.Objects;
import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.startsWith;
/**
* Tests for {@link BytesStreamOutput} paging behaviour.
@@ -301,7 +303,7 @@ public class BytesStreamsTests extends ESTestCase {
public void testNamedWriteable() throws IOException {
BytesStreamOutput out = new BytesStreamOutput();
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
- namedWriteableRegistry.registerPrototype(BaseNamedWriteable.class, new TestNamedWriteable(null, null));
+ namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new);
TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));
out.writeNamedWriteable(namedWriteableIn);
byte[] bytes = out.bytes().toBytes();
@@ -314,32 +316,25 @@ public class BytesStreamsTests extends ESTestCase {
public void testNamedWriteableDuplicates() throws IOException {
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
- namedWriteableRegistry.registerPrototype(BaseNamedWriteable.class, new TestNamedWriteable(null, null));
- try {
- namedWriteableRegistry.registerPrototype(BaseNamedWriteable.class, new TestNamedWriteable(null, null));
- fail("registerPrototype should have failed");
- } catch(IllegalArgumentException e) {
- assertThat(e.getMessage(), equalTo("named writeable of type [" + TestNamedWriteable.class.getName() + "] with name [" + TestNamedWriteable.NAME + "] is already registered by type ["
- + TestNamedWriteable.class.getName() + "] within category [" + BaseNamedWriteable.class.getName() + "]"));
- }
+ namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new);
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
+ () -> namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new));
+ assertThat(e.getMessage(), startsWith("named writeable [" + BaseNamedWriteable.class.getName() + "][" + TestNamedWriteable.NAME
+ + "] is already registered by ["));
}
public void testNamedWriteableUnknownCategory() throws IOException {
BytesStreamOutput out = new BytesStreamOutput();
out.writeNamedWriteable(new TestNamedWriteable("test1", "test2"));
StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(out.bytes().toBytes()), new NamedWriteableRegistry());
- try {
- //no named writeable registered with given name, can write but cannot read it back
- in.readNamedWriteable(BaseNamedWriteable.class);
- fail("read should have failed");
- } catch(IllegalArgumentException e) {
- assertThat(e.getMessage(), equalTo("unknown named writeable category [" + BaseNamedWriteable.class.getName() + "]"));
- }
+ //no named writeable registered with given name, can write but cannot read it back
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class));
+ assertThat(e.getMessage(), equalTo("unknown named writeable category [" + BaseNamedWriteable.class.getName() + "]"));
}
public void testNamedWriteableUnknownNamedWriteable() throws IOException {
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
- namedWriteableRegistry.registerPrototype(BaseNamedWriteable.class, new TestNamedWriteable(null, null));
+ namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, TestNamedWriteable::new);
BytesStreamOutput out = new BytesStreamOutput();
out.writeNamedWriteable(new NamedWriteable() {
@Override
@@ -362,7 +357,7 @@ public class BytesStreamsTests extends ESTestCase {
in.readNamedWriteable(BaseNamedWriteable.class);
fail("read should have failed");
} catch(IllegalArgumentException e) {
- assertThat(e.getMessage(), equalTo("unknown named writeable with name [unknown] within category [" + BaseNamedWriteable.class.getName() + "]"));
+ assertThat(e.getMessage(), equalTo("unknown named writeable [" + BaseNamedWriteable.class.getName() + "][unknown]"));
}
}
@@ -379,6 +374,27 @@ public class BytesStreamsTests extends ESTestCase {
}
}
+ public void testNamedWriteableReaderReturnsNull() throws IOException {
+ BytesStreamOutput out = new BytesStreamOutput();
+ NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
+ namedWriteableRegistry.register(BaseNamedWriteable.class, TestNamedWriteable.NAME, (StreamInput in) -> null);
+ TestNamedWriteable namedWriteableIn = new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));
+ out.writeNamedWriteable(namedWriteableIn);
+ byte[] bytes = out.bytes().toBytes();
+ StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry);
+ assertEquals(in.available(), bytes.length);
+ IOException e = expectThrows(IOException.class, () -> in.readNamedWriteable(BaseNamedWriteable.class));
+ assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream."));
+ }
+
+ public void testOptionalWriteableReaderReturnsNull() throws IOException {
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.writeOptionalWriteable(new TestNamedWriteable(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10)));
+ StreamInput in = StreamInput.wrap(out.bytes().toBytes());
+ IOException e = expectThrows(IOException.class, () -> in.readOptionalWriteable((StreamInput ignored) -> null));
+ assertThat(e.getMessage(), endsWith("] returned null which is not allowed and probably means it screwed up the stream."));
+ }
+
private static abstract class BaseNamedWriteable<T> implements NamedWriteable<T> {
}
@@ -395,6 +411,11 @@ public class BytesStreamsTests extends ESTestCase {
this.field2 = field2;
}
+ public TestNamedWriteable(StreamInput in) throws IOException {
+ field1 = in.readString();
+ field2 = in.readString();
+ }
+
@Override
public String getWriteableName() {
return NAME;
@@ -407,11 +428,6 @@ public class BytesStreamsTests extends ESTestCase {
}
@Override
- public TestNamedWriteable readFrom(StreamInput in) throws IOException {
- return new TestNamedWriteable(in.readString(), in.readString());
- }
-
- @Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java
index bd794f96da..4d8f3e6e58 100644
--- a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java
+++ b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java
@@ -191,23 +191,24 @@ public class NetworkModuleTests extends ModuleTestCase {
Settings settings = Settings.EMPTY;
NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false, registry);
- // Builtin prototype comes back
- assertNotNull(registry.getPrototype(Task.Status.class, ReplicationTask.Status.PROTOTYPE.getWriteableName()));
+ // Builtin reader comes back
+ assertNotNull(registry.getReader(Task.Status.class, ReplicationTask.Status.NAME));
- Task.Status dummy = new DummyTaskStatus();
- module.registerTaskStatus(dummy);
- assertThat(registry.getPrototype(Task.Status.class, "dummy"), sameInstance(dummy));
+ module.registerTaskStatus(DummyTaskStatus.NAME, DummyTaskStatus::new);
+ assertEquals("test", expectThrows(UnsupportedOperationException.class,
+ () -> registry.getReader(Task.Status.class, DummyTaskStatus.NAME).read(null)).getMessage());
}
private class DummyTaskStatus implements Task.Status {
- @Override
- public String getWriteableName() {
- return "dummy";
+ public static final String NAME = "dummy";
+
+ public DummyTaskStatus(StreamInput in) {
+ throw new UnsupportedOperationException("test");
}
@Override
- public Status readFrom(StreamInput in) throws IOException {
- throw new UnsupportedOperationException();
+ public String getWriteableName() {
+ return NAME;
}
@Override
diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java
index e3f2bc1bb2..eb6cc56816 100644
--- a/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java
+++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java
@@ -201,8 +201,8 @@ public class SettingsTests extends ESTestCase {
assertThat(settings.getAsArray("value"), arrayContaining("2", "3"));
settings = settingsBuilder()
- .put(new YamlSettingsLoader().load("value: 1"))
- .put(new YamlSettingsLoader().load("value: [ 2, 3 ]"))
+ .put(new YamlSettingsLoader(false).load("value: 1"))
+ .put(new YamlSettingsLoader(false).load("value: [ 2, 3 ]"))
.build();
assertThat(settings.getAsArray("value"), arrayContaining("2", "3"));
diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java
index d7f10891f2..154ef8ee03 100644
--- a/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java
+++ b/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java
@@ -25,15 +25,14 @@ import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.test.ESTestCase;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.Matchers.equalTo;
-/**
- *
- */
public class JsonSettingsLoaderTests extends ESTestCase {
+
public void testSimpleJsonSettings() throws Exception {
- String json = "/org/elasticsearch/common/settings/loader/test-settings.json";
- Settings settings = settingsBuilder()
+ final String json = "/org/elasticsearch/common/settings/loader/test-settings.json";
+ final Settings settings = settingsBuilder()
.loadFromStream(json, getClass().getResourceAsStream(json))
.build();
@@ -50,15 +49,23 @@ public class JsonSettingsLoaderTests extends ESTestCase {
}
public void testDuplicateKeysThrowsException() {
- String json = "{\"foo\":\"bar\",\"foo\":\"baz\"}";
- try {
- settingsBuilder()
- .loadFromSource(json)
- .build();
- fail("expected exception");
- } catch (SettingsException e) {
- assertEquals(e.getCause().getClass(), ElasticsearchParseException.class);
- assertTrue(e.toString().contains("duplicate settings key [foo] found at line number [1], column number [20], previous value [bar], current value [baz]"));
- }
+ final String json = "{\"foo\":\"bar\",\"foo\":\"baz\"}";
+ final SettingsException e = expectThrows(SettingsException.class, () -> settingsBuilder().loadFromSource(json).build());
+ assertEquals(e.getCause().getClass(), ElasticsearchParseException.class);
+ assertThat(
+ e.toString(),
+ containsString("duplicate settings key [foo] " +
+ "found at line number [1], " +
+ "column number [20], " +
+ "previous value [bar], " +
+ "current value [baz]"));
}
+
+ public void testNullValuedSettingThrowsException() {
+ final String json = "{\"foo\":null}";
+ final ElasticsearchParseException e =
+ expectThrows(ElasticsearchParseException.class, () -> new JsonSettingsLoader(false).load(json));
+ assertThat(e.toString(), containsString("null-valued setting found for key [foo] found at line number [1], column number [8]"));
+ }
+
}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoaderTests.java
index 7a1897fbaf..c13ae7cc68 100644
--- a/core/src/test/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoaderTests.java
+++ b/core/src/test/java/org/elasticsearch/common/settings/loader/PropertiesSettingsLoaderTests.java
@@ -21,27 +21,37 @@ package org.elasticsearch.common.settings.loader;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.test.ESTestCase;
+import org.junit.Before;
import java.io.IOException;
import java.nio.charset.Charset;
public class PropertiesSettingsLoaderTests extends ESTestCase {
+
+ private PropertiesSettingsLoader loader;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ loader = new PropertiesSettingsLoader();
+ }
+
public void testDuplicateKeyFromStringThrowsException() throws IOException {
- PropertiesSettingsLoader loader = new PropertiesSettingsLoader();
- try {
- loader.load("foo=bar\nfoo=baz");
- fail("expected exception");
- } catch (ElasticsearchParseException e) {
- assertEquals(e.getMessage(), "duplicate settings key [foo] found, previous value [bar], current value [baz]");
- }
+ final ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> loader.load("foo=bar\nfoo=baz"));
+ assertEquals(e.getMessage(), "duplicate settings key [foo] found, previous value [bar], current value [baz]");
}
public void testDuplicateKeysFromBytesThrowsException() throws IOException {
- PropertiesSettingsLoader loader = new PropertiesSettingsLoader();
- try {
- loader.load("foo=bar\nfoo=baz".getBytes(Charset.defaultCharset()));
- } catch (ElasticsearchParseException e) {
- assertEquals(e.getMessage(), "duplicate settings key [foo] found, previous value [bar], current value [baz]");
- }
+ final ElasticsearchParseException e = expectThrows(
+ ElasticsearchParseException.class,
+ () -> loader.load("foo=bar\nfoo=baz".getBytes(Charset.defaultCharset()))
+ );
+ assertEquals(e.getMessage(), "duplicate settings key [foo] found, previous value [bar], current value [baz]");
}
+
+ public void testThatNoDuplicatesPropertiesDoesNotAcceptNullValues() {
+ final PropertiesSettingsLoader.NoDuplicatesProperties properties = loader.new NoDuplicatesProperties();
+ expectThrows(NullPointerException.class, () -> properties.put("key", null));
+ }
+
}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java
index 48703044ec..2e2a187da0 100644
--- a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java
+++ b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java
@@ -28,13 +28,11 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
-/**
- *
- */
public class YamlSettingsLoaderTests extends ESTestCase {
+
public void testSimpleYamlSettings() throws Exception {
- String yaml = "/org/elasticsearch/common/settings/loader/test-settings.yml";
- Settings settings = settingsBuilder()
+ final String yaml = "/org/elasticsearch/common/settings/loader/test-settings.yml";
+ final Settings settings = settingsBuilder()
.loadFromStream(yaml, getClass().getResourceAsStream(yaml))
.build();
@@ -51,39 +49,41 @@ public class YamlSettingsLoaderTests extends ESTestCase {
}
public void testIndentation() {
- String yaml = "/org/elasticsearch/common/settings/loader/indentation-settings.yml";
- try {
- settingsBuilder()
- .loadFromStream(yaml, getClass().getResourceAsStream(yaml))
- .build();
- fail("Expected SettingsException");
- } catch(SettingsException e ) {
- assertThat(e.getMessage(), containsString("Failed to load settings"));
- }
+ final String yaml = "/org/elasticsearch/common/settings/loader/indentation-settings.yml";
+ final SettingsException e =
+ expectThrows(
+ SettingsException.class,
+ () -> settingsBuilder().loadFromStream(yaml, getClass().getResourceAsStream(yaml)).build());
+ assertThat(e.getMessage(), containsString("Failed to load settings"));
}
public void testIndentationWithExplicitDocumentStart() {
- String yaml = "/org/elasticsearch/common/settings/loader/indentation-with-explicit-document-start-settings.yml";
- try {
- settingsBuilder()
- .loadFromStream(yaml, getClass().getResourceAsStream(yaml))
- .build();
- fail("Expected SettingsException");
- } catch (SettingsException e) {
- assertThat(e.getMessage(), containsString("Failed to load settings"));
- }
+ final String yaml = "/org/elasticsearch/common/settings/loader/indentation-with-explicit-document-start-settings.yml";
+ final SettingsException e =
+ expectThrows(
+ SettingsException.class,
+ () -> settingsBuilder().loadFromStream(yaml, getClass().getResourceAsStream(yaml)).build());
+ assertThat(e.getMessage(), containsString("Failed to load settings"));
}
public void testDuplicateKeysThrowsException() {
- String yaml = "foo: bar\nfoo: baz";
- try {
- settingsBuilder()
- .loadFromSource(yaml)
- .build();
- fail("expected exception");
- } catch (SettingsException e) {
- assertEquals(e.getCause().getClass(), ElasticsearchParseException.class);
- assertTrue(e.toString().contains("duplicate settings key [foo] found at line number [2], column number [6], previous value [bar], current value [baz]"));
- }
+ final String yaml = "foo: bar\nfoo: baz";
+ final SettingsException e = expectThrows(SettingsException.class, () -> settingsBuilder().loadFromSource(yaml).build());
+ assertEquals(e.getCause().getClass(), ElasticsearchParseException.class);
+ assertThat(
+ e.toString(),
+ containsString("duplicate settings key [foo] " +
+ "found at line number [2], " +
+ "column number [6], " +
+ "previous value [bar], " +
+ "current value [baz]"));
+ }
+
+ public void testNullValuedSettingThrowsException() {
+ final String yaml = "foo:";
+ final ElasticsearchParseException e =
+ expectThrows(ElasticsearchParseException.class, () -> new YamlSettingsLoader(false).load(yaml));
+ assertThat(e.toString(), containsString("null-valued setting found for key [foo] found at line number [1], column number [5]"));
}
+
}
diff --git a/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java b/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
index 5d7bbb3ca1..f9a4d3f22a 100644
--- a/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
+++ b/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
@@ -83,7 +83,7 @@ public class DistanceUnitTests extends ESTestCase {
try (BytesStreamOutput out = new BytesStreamOutput()) {
unit.writeTo(out);
try (StreamInput in = StreamInput.wrap(out.bytes())) {
- assertThat("Roundtrip serialisation failed.", DistanceUnit.readDistanceUnit(in), equalTo(unit));
+ assertThat("Roundtrip serialisation failed.", DistanceUnit.readFromStream(in), equalTo(unit));
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java
index e1d6beecaa..468d01aaec 100644
--- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java
@@ -25,10 +25,19 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.test.ESTestCase;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState;
import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
/**
*/
@@ -89,4 +98,34 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
}
assertFalse("should not ignore, because current state doesn't have a master", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build()));
}
+
+ public void testFilterNonMasterPingResponse() {
+ ArrayList<ZenPing.PingResponse> responses = new ArrayList<>();
+ ArrayList<DiscoveryNode> masterNodes = new ArrayList<>();
+ ArrayList<DiscoveryNode> allNodes = new ArrayList<>();
+ for (int i = randomIntBetween(10, 20); i >= 0; i--) {
+ Map<String, String> attrs = new HashMap<>();
+ for (String attr : randomSubsetOf(
+ Arrays.asList(DiscoveryNode.INGEST_ATTR, DiscoveryNode.DATA_ATTR, DiscoveryNode.MASTER_ATTR))) {
+ attrs.put(attr, randomBoolean() + "");
+ }
+
+ DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, DummyTransportAddress.INSTANCE, attrs, Version.CURRENT);
+ responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomBoolean()));
+ allNodes.add(node);
+ if (node.isMasterNode()) {
+ masterNodes.add(node);
+ }
+ }
+
+ boolean ignore = randomBoolean();
+ List<ZenPing.PingResponse> filtered = ZenDiscovery.filterPingResponses(
+ responses.toArray(new ZenPing.PingResponse[responses.size()]), ignore, logger);
+ final List<DiscoveryNode> filteredNodes = filtered.stream().map(ZenPing.PingResponse::node).collect(Collectors.toList());
+ if (ignore) {
+ assertThat(filteredNodes, equalTo(masterNodes));
+ } else {
+ assertThat(filteredNodes, equalTo(allNodes));
+ }
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java
index 4da9c2df17..dac284ee59 100644
--- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java
+++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java
@@ -19,11 +19,13 @@
package org.elasticsearch.gateway;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.common.settings.Settings;
@@ -37,11 +39,13 @@ import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.test.InternalTestCluster.RestartCallback;
-import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.store.MockFSDirectoryService;
import org.elasticsearch.test.store.MockFSIndexStore;
import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.stream.IntStream;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
@@ -88,10 +92,13 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
assertHitCount(client().prepareSearch().setSize(0).setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
ensureYellow("test"); // wait for primary allocations here otherwise if we have a lot of shards we might have a
// shard that is still in post recovery when we restart and the ensureYellow() below will timeout
+
+ Map<String, long[]> primaryTerms = assertAndCapturePrimaryTerms(null);
internalCluster().fullRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
+ primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
client().admin().indices().prepareRefresh().execute().actionGet();
assertHitCount(client().prepareSearch().setSize(0).setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
@@ -100,11 +107,37 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
+ primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
client().admin().indices().prepareRefresh().execute().actionGet();
assertHitCount(client().prepareSearch().setSize(0).setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
}
+ private Map<String, long[]> assertAndCapturePrimaryTerms(Map<String, long[]> previousTerms) {
+ if (previousTerms == null) {
+ previousTerms = new HashMap<>();
+ }
+ final Map<String, long[]> result = new HashMap<>();
+ final ClusterState state = client().admin().cluster().prepareState().get().getState();
+ for (ObjectCursor<IndexMetaData> cursor : state.metaData().indices().values()) {
+ final IndexMetaData indexMetaData = cursor.value;
+ final String index = indexMetaData.getIndex().getName();
+ final long[] previous = previousTerms.get(index);
+ final long[] current = IntStream.range(0, indexMetaData.getNumberOfShards()).mapToLong(indexMetaData::primaryTerm).toArray();
+ if (previous == null) {
+ result.put(index, current);
+ } else {
+ assertThat("number of terms changed for index [" + index + "]", current.length, equalTo(previous.length));
+ for (int shard = 0; shard < current.length; shard++) {
+ assertThat("primary term didn't increase for [" + index + "][" + shard + "]", current[shard], greaterThan(previous[shard]));
+ }
+ result.put(index, current);
+ }
+ }
+
+ return result;
+ }
+
public void testSingleNodeNoFlush() throws Exception {
internalCluster().startNode();
@@ -163,10 +196,14 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
logger.info("Ensure all primaries have been started");
ensureYellow();
}
+
+ Map<String, long[]> primaryTerms = assertAndCapturePrimaryTerms(null);
+
internalCluster().fullRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
+ primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
for (int i = 0; i <= randomInt(10); i++) {
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), value1Docs + value2Docs);
@@ -180,6 +217,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
+ primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
for (int i = 0; i <= randomInt(10); i++) {
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), value1Docs + value2Docs);
@@ -201,10 +239,13 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
ensureYellow("test"); // wait for primary allocations here otherwise if we have a lot of shards we might have a
// shard that is still in post recovery when we restart and the ensureYellow() below will timeout
+ Map<String, long[]> primaryTerms = assertAndCapturePrimaryTerms(null);
+
internalCluster().fullRestart();
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
+ primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet(), 2);
@@ -214,6 +255,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureYellow();
+ primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet(), 2);
@@ -236,6 +278,8 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet(), 2);
}
+ Map<String, long[]> primaryTerms = assertAndCapturePrimaryTerms(null);
+
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
@@ -251,6 +295,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
logger.info("Running Cluster Health (wait for the shards to startup)");
ensureGreen();
+ primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet(), 2);
@@ -276,6 +321,8 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
String metaDataUuid = client().admin().cluster().prepareState().execute().get().getState().getMetaData().clusterUUID();
assertThat(metaDataUuid, not(equalTo("_na_")));
+ Map<String, long[]> primaryTerms = assertAndCapturePrimaryTerms(null);
+
logger.info("--> closing first node, and indexing more data to the second node");
internalCluster().fullRestart(new RestartCallback() {
@@ -315,6 +362,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
logger.info("--> running cluster_health (wait for the shards to startup)");
ensureGreen();
+ primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
assertThat(client().admin().cluster().prepareState().execute().get().getState().getMetaData().clusterUUID(), equalTo(metaDataUuid));
@@ -386,11 +434,15 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
.setTransientSettings(settingsBuilder()
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE))
.get();
+
+ Map<String, long[]> primaryTerms = assertAndCapturePrimaryTerms(null);
+
logger.info("--> full cluster restart");
internalCluster().fullRestart();
logger.info("--> waiting for cluster to return to green after {}shutdown", useSyncIds ? "" : "second ");
ensureGreen();
+ primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
if (useSyncIds) {
assertSyncIdsNotNull();
@@ -445,6 +497,8 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
internalCluster().startNode(settingsBuilder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir()).build());
ensureGreen();
+ Map<String, long[]> primaryTerms = assertAndCapturePrimaryTerms(null);
+
internalCluster().fullRestart(new RestartCallback() {
@@ -455,6 +509,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
});
ensureYellow();
+ primaryTerms = assertAndCapturePrimaryTerms(primaryTerms);
assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
assertHitCount(client().prepareSearch("test").setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 1);
diff --git a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java
index 3a4020e410..97258b12a3 100644
--- a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java
@@ -187,12 +187,13 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
return ThreadPool.Names.GENERIC;
}
};
+
latch.get().await();
latch.set(new CountDownLatch(1));
assertEquals(1, count.get());
- latch2.get().countDown();
- latch2.set(new CountDownLatch(1));
-
+ // here we need to swap first before we let it go otherwise threads might be very fast and run that task twice due to
+ // random exception and the schedule interval is 1ms
+ latch2.getAndSet(new CountDownLatch(1)).countDown();
latch.get().await();
assertEquals(2, count.get());
task.close();
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java
index e3a81d2a06..477e48ae5d 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java
@@ -42,7 +42,6 @@ import org.elasticsearch.index.mapper.core.TextFieldMapper;
import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException;
-import java.util.List;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
@@ -245,7 +244,17 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
// original mapping not modified
assertEquals(mapping, serialize(mapper));
// but we have an update
- assertEquals("{\"type\":{\"properties\":{\"foo\":{\"type\":\"text\"}}}}", serialize(update));
+ assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("foo")
+ .field("type", "text")
+ .startObject("fields")
+ .startObject("keyword")
+ .field("type", "keyword")
+ .field("ignore_above", 256)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject().string(), serialize(update));
}
public void testIncremental() throws Exception {
@@ -267,7 +276,14 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
// but we have an update
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
// foo is NOT in the update
- .startObject("bar").field("type", "text").endObject()
+ .startObject("bar").field("type", "text")
+ .startObject("fields")
+ .startObject("keyword")
+ .field("type", "keyword")
+ .field("ignore_above", 256)
+ .endObject()
+ .endObject()
+ .endObject()
.endObject().endObject().string(), serialize(update));
}
@@ -287,8 +303,22 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
assertEquals(mapping, serialize(mapper));
// but we have an update
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
- .startObject("bar").field("type", "text").endObject()
- .startObject("foo").field("type", "text").endObject()
+ .startObject("bar").field("type", "text")
+ .startObject("fields")
+ .startObject("keyword")
+ .field("type", "keyword")
+ .field("ignore_above", 256)
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("foo").field("type", "text")
+ .startObject("fields")
+ .startObject("keyword")
+ .field("type", "keyword")
+ .field("ignore_above", 256)
+ .endObject()
+ .endObject()
+ .endObject()
.endObject().endObject().string(), serialize(update));
}
@@ -308,7 +338,9 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
assertEquals(mapping, serialize(mapper));
// but we have an update
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
- .startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "text").endObject().endObject().endObject().endObject().endObject()
+ .startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "text")
+ .startObject("fields").startObject("keyword").field("type", "keyword").field("ignore_above", 256).endObject()
+ .endObject().endObject().endObject().endObject().endObject().endObject()
.endObject().endObject().endObject().string(), serialize(update));
}
@@ -328,7 +360,15 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
assertEquals(mapping, serialize(mapper));
// but we have an update
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
- .startObject("foo").field("type", "text").endObject()
+ .startObject("foo")
+ .field("type", "text")
+ .startObject("fields")
+ .startObject("keyword")
+ .field("type", "keyword")
+ .field("ignore_above", 256)
+ .endObject()
+ .endObject()
+ .endObject()
.endObject().endObject().endObject().string(), serialize(update));
}
@@ -348,7 +388,9 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
assertEquals(mapping, serialize(mapper));
// but we have an update
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
- .startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "text").endObject().endObject().endObject().endObject().endObject()
+ .startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "text").startObject("fields")
+ .startObject("keyword").field("type", "keyword").field("ignore_above", 256).endObject()
+ .endObject().endObject().endObject().endObject().endObject().endObject()
.endObject().endObject().endObject().string(), serialize(update));
}
@@ -369,7 +411,14 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
assertEquals(mapping, serialize(mapper));
assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("foo").startObject("properties")
- .startObject("bar").field("type", "text").endObject()
+ .startObject("bar").field("type", "text")
+ .startObject("fields")
+ .startObject("keyword")
+ .field("type", "keyword")
+ .field("ignore_above", 256)
+ .endObject()
+ .endObject()
+ .endObject()
.startObject("baz").field("type", "long").endObject()
.endObject().endObject()
.endObject().endObject().endObject().string(), serialize(update));
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java
index 2646d94471..6fc4c4a02a 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java
@@ -102,7 +102,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
- assertFieldNames(set("a", "b", "b.c", "_uid", "_type", "_version", "_source", "_all"), doc);
+ assertFieldNames(set("a", "a.keyword", "b", "b.c", "_uid", "_type", "_version", "_source", "_all"), doc);
}
public void testExplicitEnabled() throws Exception {
@@ -119,7 +119,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.bytes());
- assertFieldNames(set("field", "_uid", "_type", "_version", "_source", "_all"), doc);
+ assertFieldNames(set("field", "field.keyword", "_uid", "_type", "_version", "_source", "_all"), doc);
}
public void testDisabled() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/internal/TypeFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/internal/TypeFieldTypeTests.java
index c01b04584e..91216983b7 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/internal/TypeFieldTypeTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/internal/TypeFieldTypeTests.java
@@ -18,6 +18,23 @@
*/
package org.elasticsearch.index.mapper.internal;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
import org.elasticsearch.index.mapper.FieldTypeTestCase;
import org.elasticsearch.index.mapper.MappedFieldType;
@@ -26,4 +43,36 @@ public class TypeFieldTypeTests extends FieldTypeTestCase {
protected MappedFieldType createDefaultFieldType() {
return new TypeFieldMapper.TypeFieldType();
}
+
+ public void testTermQuery() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
+ Document doc = new Document();
+ StringField type = new StringField(TypeFieldMapper.NAME, "my_type", Store.NO);
+ doc.add(type);
+ w.addDocument(doc);
+ w.addDocument(doc);
+ IndexReader reader = DirectoryReader.open(w);
+
+ TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType();
+ ft.setName(TypeFieldMapper.NAME);
+ Query query = ft.termQuery("my_type", null);
+
+ assertEquals(new MatchAllDocsQuery(), query.rewrite(reader));
+
+ // Make sure that Lucene actually simplifies the query when there is a single type
+ Query userQuery = new PhraseQuery("body", "quick", "fox");
+ Query filteredQuery = new BooleanQuery.Builder().add(userQuery, Occur.MUST).add(query, Occur.FILTER).build();
+ Query rewritten = new IndexSearcher(reader).rewrite(filteredQuery);
+ assertEquals(userQuery, rewritten);
+
+ type.setStringValue("my_type2");
+ w.addDocument(doc);
+ reader.close();
+ reader = DirectoryReader.open(w);
+
+ assertEquals(new ConstantScoreQuery(new TermQuery(new Term(TypeFieldMapper.NAME, "my_type"))), query.rewrite(reader));
+
+ IOUtils.close(reader, w, dir);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/percolator/ExtractQueryTermsServiceTests.java b/core/src/test/java/org/elasticsearch/index/percolator/ExtractQueryTermsServiceTests.java
index f17a4fc664..572eb1b46c 100644
--- a/core/src/test/java/org/elasticsearch/index/percolator/ExtractQueryTermsServiceTests.java
+++ b/core/src/test/java/org/elasticsearch/index/percolator/ExtractQueryTermsServiceTests.java
@@ -24,6 +24,8 @@ import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.memory.MemoryIndex;
+import org.apache.lucene.queries.BlendedTermQuery;
+import org.apache.lucene.queries.CommonTermsQuery;
import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -39,16 +41,13 @@ import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.instanceOf;
-import static org.hamcrest.Matchers.nullValue;
import static org.hamcrest.Matchers.sameInstance;
public class ExtractQueryTermsServiceTests extends ESTestCase {
@@ -218,6 +217,31 @@ public class ExtractQueryTermsServiceTests extends ESTestCase {
assertThat(terms.get(0).bytes(), equalTo(termQuery1.getTerm().bytes()));
}
+ public void testExtractQueryMetadata_commonTermsQuery() {
+ CommonTermsQuery commonTermsQuery = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 100);
+ commonTermsQuery.add(new Term("_field", "_term1"));
+ commonTermsQuery.add(new Term("_field", "_term2"));
+ List<Term> terms = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(commonTermsQuery));
+ Collections.sort(terms);
+ assertThat(terms.size(), equalTo(2));
+ assertThat(terms.get(0).field(), equalTo("_field"));
+ assertThat(terms.get(0).text(), equalTo("_term1"));
+ assertThat(terms.get(1).field(), equalTo("_field"));
+ assertThat(terms.get(1).text(), equalTo("_term2"));
+ }
+
+ public void testExtractQueryMetadata_blendedTermQuery() {
+ Term[] terms = new Term[]{new Term("_field", "_term1"), new Term("_field", "_term2")};
+ BlendedTermQuery commonTermsQuery = BlendedTermQuery.booleanBlendedQuery(terms, false);
+ List<Term> result = new ArrayList<>(ExtractQueryTermsService.extractQueryTerms(commonTermsQuery));
+ Collections.sort(result);
+ assertThat(result.size(), equalTo(2));
+ assertThat(result.get(0).field(), equalTo("_field"));
+ assertThat(result.get(0).text(), equalTo("_term1"));
+ assertThat(result.get(1).field(), equalTo("_field"));
+ assertThat(result.get(1).text(), equalTo("_term2"));
+ }
+
public void testExtractQueryMetadata_unsupportedQuery() {
TermRangeQuery termRangeQuery = new TermRangeQuery("_field", null, null, true, false);
@@ -229,7 +253,7 @@ public class ExtractQueryTermsServiceTests extends ESTestCase {
}
TermQuery termQuery1 = new TermQuery(new Term("_field", "_term"));
- BooleanQuery.Builder builder = new BooleanQuery.Builder();;
+ BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.add(termQuery1, BooleanClause.Occur.SHOULD);
builder.add(termRangeQuery, BooleanClause.Occur.SHOULD);
BooleanQuery bq = builder.build();
diff --git a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java
index 4715574b5a..675ad954e0 100644
--- a/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/HasChildQueryBuilderTests.java
@@ -275,12 +275,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
assertThat(termQuery.getTerm().bytes(), equalTo(ids[0]));
//check the type filter
assertThat(booleanQuery.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.FILTER));
- assertThat(booleanQuery.clauses().get(1).getQuery(), instanceOf(ConstantScoreQuery.class));
- ConstantScoreQuery typeConstantScoreQuery = (ConstantScoreQuery) booleanQuery.clauses().get(1).getQuery();
- assertThat(typeConstantScoreQuery.getQuery(), instanceOf(TermQuery.class));
- TermQuery typeTermQuery = (TermQuery) typeConstantScoreQuery.getQuery();
- assertThat(typeTermQuery.getTerm().field(), equalTo(TypeFieldMapper.NAME));
- assertThat(typeTermQuery.getTerm().text(), equalTo(type));
+ assertEquals(new TypeFieldMapper.TypeQuery(new BytesRef(type)), booleanQuery.clauses().get(1).getQuery());
}
/**
diff --git a/core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java b/core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java
index 347802d377..cbcfba55b9 100644
--- a/core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/PercolatorQueryTests.java
@@ -32,9 +32,10 @@ import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.memory.MemoryIndex;
+import org.apache.lucene.queries.BlendedTermQuery;
+import org.apache.lucene.queries.CommonTermsQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.PhraseQuery;
@@ -59,8 +60,6 @@ import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.is;
-import static org.hamcrest.Matchers.not;
public class PercolatorQueryTests extends ESTestCase {
@@ -179,36 +178,30 @@ public class PercolatorQueryTests extends ESTestCase {
MemoryIndex memoryIndex = new MemoryIndex();
String id = Integer.toString(i);
memoryIndex.addField("field", id, new WhitespaceAnalyzer());
- IndexSearcher percolateSearcher = memoryIndex.createSearcher();
-
- PercolatorQuery.Builder builder1 = new PercolatorQuery.Builder(
- "docType",
- queryRegistry,
- new BytesArray("{}"),
- percolateSearcher,
- new MatchAllDocsQuery()
- );
- // enables the optimization that prevents queries from being evaluated that don't match
- builder1.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME);
- TopDocs topDocs1 = shardSearcher.search(builder1.build(), 10);
-
- PercolatorQuery.Builder builder2 = new PercolatorQuery.Builder(
- "docType",
- queryRegistry,
- new BytesArray("{}"),
- percolateSearcher,
- new MatchAllDocsQuery()
- );
- TopDocs topDocs2 = shardSearcher.search(builder2.build(), 10);
-
- assertThat(topDocs1.totalHits, equalTo(topDocs2.totalHits));
- assertThat(topDocs1.scoreDocs.length, equalTo(topDocs2.scoreDocs.length));
- for (int j = 0; j < topDocs1.scoreDocs.length; j++) {
- assertThat(topDocs1.scoreDocs[j].doc, equalTo(topDocs2.scoreDocs[j].doc));
- }
+ duelRun(memoryIndex, shardSearcher);
}
}
+ public void testDuelSpecificQueries() throws Exception {
+ CommonTermsQuery commonTermsQuery = new CommonTermsQuery(BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, 128);
+ commonTermsQuery.add(new Term("field", "quick"));
+ commonTermsQuery.add(new Term("field", "brown"));
+ commonTermsQuery.add(new Term("field", "fox"));
+ addPercolatorQuery("_id1", commonTermsQuery);
+
+ BlendedTermQuery blendedTermQuery = BlendedTermQuery.booleanBlendedQuery(new Term[]{new Term("field", "quick"),
+ new Term("field", "brown"), new Term("field", "fox")}, false);
+ addPercolatorQuery("_id2", blendedTermQuery);
+
+ indexWriter.close();
+ directoryReader = DirectoryReader.open(directory);
+ IndexSearcher shardSearcher = newSearcher(directoryReader);
+
+ MemoryIndex memoryIndex = new MemoryIndex();
+ memoryIndex.addField("field", "the quick brown fox jumps over the lazy dog", new WhitespaceAnalyzer());
+ duelRun(memoryIndex, shardSearcher);
+ }
+
void addPercolatorQuery(String id, Query query, String... extraFields) throws IOException {
queries.put(id, query);
ParseContext.Document document = new ParseContext.Document();
@@ -222,6 +215,35 @@ public class PercolatorQueryTests extends ESTestCase {
indexWriter.addDocument(document);
}
+ private void duelRun(MemoryIndex memoryIndex, IndexSearcher shardSearcher) throws IOException {
+ IndexSearcher percolateSearcher = memoryIndex.createSearcher();
+ PercolatorQuery.Builder builder1 = new PercolatorQuery.Builder(
+ "docType",
+ queryRegistry,
+ new BytesArray("{}"),
+ percolateSearcher,
+ new MatchAllDocsQuery()
+ );
+ // enables the optimization that prevents queries from being evaluated that don't match
+ builder1.extractQueryTermsQuery(EXTRACTED_TERMS_FIELD_NAME, UNKNOWN_QUERY_FIELD_NAME);
+ TopDocs topDocs1 = shardSearcher.search(builder1.build(), 10);
+
+ PercolatorQuery.Builder builder2 = new PercolatorQuery.Builder(
+ "docType",
+ queryRegistry,
+ new BytesArray("{}"),
+ percolateSearcher,
+ new MatchAllDocsQuery()
+ );
+ TopDocs topDocs2 = shardSearcher.search(builder2.build(), 10);
+
+ assertThat(topDocs1.totalHits, equalTo(topDocs2.totalHits));
+ assertThat(topDocs1.scoreDocs.length, equalTo(topDocs2.scoreDocs.length));
+ for (int j = 0; j < topDocs1.scoreDocs.length; j++) {
+ assertThat(topDocs1.scoreDocs[j].doc, equalTo(topDocs2.scoreDocs[j].doc));
+ }
+ }
+
private final static class CustomQuery extends Query {
private final Term term;
diff --git a/core/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java
index 4621390e5f..674b6aed78 100644
--- a/core/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java
@@ -19,17 +19,12 @@
package org.elasticsearch.index.query;
-import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.util.BytesRef;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import java.io.IOException;
-import static org.hamcrest.Matchers.either;
-import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.instanceOf;
-
public class TypeQueryBuilderTests extends AbstractQueryTestCase<TypeQueryBuilder> {
@Override
@@ -39,14 +34,7 @@ public class TypeQueryBuilderTests extends AbstractQueryTestCase<TypeQueryBuilde
@Override
protected void doAssertLuceneQuery(TypeQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {
- assertThat(query, either(instanceOf(TermQuery.class)).or(instanceOf(ConstantScoreQuery.class)));
- if (query instanceof ConstantScoreQuery) {
- query = ((ConstantScoreQuery) query).getQuery();
- assertThat(query, instanceOf(TermQuery.class));
- }
- TermQuery termQuery = (TermQuery) query;
- assertThat(termQuery.getTerm().field(), equalTo(TypeFieldMapper.NAME));
- assertThat(termQuery.getTerm().text(), equalTo(queryBuilder.type()));
+ assertEquals(new TypeFieldMapper.TypeQuery(new BytesRef(queryBuilder.type())), query);
}
public void testIllegalArgument() {
diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
index 72f58f104e..1839df5dd3 100644
--- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
+++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -37,18 +37,22 @@ import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.TransportIndexAction;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.InternalClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.RestoreSource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.service.ClusterService;
@@ -59,7 +63,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.unit.ByteSizeUnit;
@@ -119,6 +122,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
+import static org.elasticsearch.common.lucene.Lucene.cleanLuceneIndex;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
@@ -127,6 +131,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
@@ -168,6 +173,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
createIndex("test");
ensureGreen();
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
+
ClusterService cs = getInstanceFromNode(ClusterService.class);
final Index index = cs.state().metaData().index("test").getIndex();
Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0));
@@ -295,31 +301,133 @@ public class IndexShardTests extends ESSingleNodeTestCase {
// expected
}
try {
- indexShard.acquireReplicaOperationLock();
+ indexShard.acquireReplicaOperationLock(indexShard.getPrimaryTerm());
fail("we should not be able to increment anymore");
} catch (IndexShardClosedException e) {
// expected
}
}
- public void testIndexOperationsCounter() throws InterruptedException, ExecutionException, IOException {
+ public void testOperationLocksOnPrimaryShards() throws InterruptedException, ExecutionException, IOException {
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test"));
IndexShard indexShard = indexService.getShardOrNull(0);
+ long primaryTerm = indexShard.getPrimaryTerm();
+
+ ShardRouting temp = indexShard.routingEntry();
+ final ShardRouting newPrimaryShardRouting;
+ if (randomBoolean()) {
+ // relocation target
+ newPrimaryShardRouting = TestShardRouting.newShardRouting(temp.index(), temp.id(), temp.currentNodeId(), "other node",
+ true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(temp.allocationId()));
+ } else if (randomBoolean()) {
+ // simulate promotion
+ ShardRouting newReplicaShardRouting = TestShardRouting.newShardRouting(temp.index(), temp.id(), temp.currentNodeId(), null,
+ false, ShardRoutingState.STARTED, temp.allocationId());
+ indexShard.updateRoutingEntry(newReplicaShardRouting, false);
+ primaryTerm = primaryTerm + 1;
+ indexShard.updatePrimaryTerm(primaryTerm);
+ newPrimaryShardRouting = TestShardRouting.newShardRouting(temp.index(), temp.id(), temp.currentNodeId(), null,
+ true, ShardRoutingState.STARTED, temp.allocationId());
+ } else {
+ newPrimaryShardRouting = temp;
+ }
+ indexShard.updateRoutingEntry(newPrimaryShardRouting, false);
+
assertEquals(0, indexShard.getActiveOperationsCount());
+ if (newPrimaryShardRouting.isRelocationTarget() == false) {
+ try {
+ indexShard.acquireReplicaOperationLock(primaryTerm);
+ fail("shard shouldn't accept operations as replica");
+ } catch (IllegalStateException ignored) {
+
+ }
+ }
Releasable operation1 = indexShard.acquirePrimaryOperationLock();
assertEquals(1, indexShard.getActiveOperationsCount());
Releasable operation2 = indexShard.acquirePrimaryOperationLock();
assertEquals(2, indexShard.getActiveOperationsCount());
+
+ Releasables.close(operation1, operation2);
+ assertEquals(0, indexShard.getActiveOperationsCount());
+ }
+
+ public void testOperationLocksOnReplicaShards() throws InterruptedException, ExecutionException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
+ ensureGreen("test");
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test"));
+ IndexShard indexShard = indexService.getShardOrNull(0);
+ long primaryTerm = indexShard.getPrimaryTerm();
+
+ // ugly hack to allow the shard to operated as a replica
+ final ShardRouting temp = indexShard.routingEntry();
+ final ShardRouting newShardRouting;
+ switch (randomInt(2)) {
+ case 0:
+ // started replica
+ newShardRouting = TestShardRouting.newShardRouting(temp.index(), temp.id(), temp.currentNodeId(), null,
+ false, ShardRoutingState.STARTED, AllocationId.newRelocation(temp.allocationId()));
+
+ indexShard.updateRoutingEntry(newShardRouting, false);
+ break;
+ case 1:
+ // initializing replica / primary
+ final boolean relocating = randomBoolean();
+ newShardRouting = TestShardRouting.newShardRouting(temp.index(), temp.id(), temp.currentNodeId(),
+ relocating ? "sourceNode" : null,
+ relocating ? randomBoolean() : false,
+ ShardRoutingState.INITIALIZING,
+ relocating ? AllocationId.newRelocation(temp.allocationId()) : temp.allocationId());
+ indexShard.updateRoutingEntry(newShardRouting, false);
+ break;
+ case 2:
+ // relocation source
+ newShardRouting = TestShardRouting.newShardRouting(temp.index(), temp.id(), temp.currentNodeId(), "otherNode",
+ false, ShardRoutingState.RELOCATING, AllocationId.newRelocation(temp.allocationId()));
+ indexShard.updateRoutingEntry(newShardRouting, false);
+ indexShard.relocated("test");
+ break;
+ default:
+ throw new UnsupportedOperationException("get your numbers straight");
+
+ }
+ logger.info("updated shard routing to {}", newShardRouting);
+
+ assertEquals(0, indexShard.getActiveOperationsCount());
+ if (newShardRouting.primary() == false) {
+ try {
+ indexShard.acquirePrimaryOperationLock();
+ fail("shard shouldn't accept primary ops");
+ } catch (IllegalStateException ignored) {
+
+ }
+ }
+
+ Releasable operation1 = indexShard.acquireReplicaOperationLock(primaryTerm);
+ assertEquals(1, indexShard.getActiveOperationsCount());
+ Releasable operation2 = indexShard.acquireReplicaOperationLock(primaryTerm);
+ assertEquals(2, indexShard.getActiveOperationsCount());
+
+ try {
+ indexShard.acquireReplicaOperationLock(primaryTerm - 1);
+ fail("you can not increment the operation counter with an older primary term");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("operation term"));
+ assertThat(e.getMessage(), containsString("too old"));
+ }
+
+ // but you can increment with a newer one..
+ indexShard.acquireReplicaOperationLock(primaryTerm + 1 + randomInt(20)).close();
Releasables.close(operation1, operation2);
assertEquals(0, indexShard.getActiveOperationsCount());
}
public void testMarkAsInactiveTriggersSyncedFlush() throws Exception {
assertAcked(client().admin().indices().prepareCreate("test")
- .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
client().prepareIndex("test", "test").setSource("{}").get();
ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
@@ -364,14 +472,14 @@ public class IndexShardTests extends ESSingleNodeTestCase {
assertTrue(shard.getEngine().getTranslog().syncNeeded());
setDurability(shard, Translog.Durability.REQUEST);
assertNoFailures(client().prepareBulk()
- .add(client().prepareIndex("test", "bar", "3").setSource("{}"))
- .add(client().prepareDelete("test", "bar", "1")).get());
+ .add(client().prepareIndex("test", "bar", "3").setSource("{}"))
+ .add(client().prepareDelete("test", "bar", "1")).get());
assertFalse(shard.getEngine().getTranslog().syncNeeded());
setDurability(shard, Translog.Durability.ASYNC);
assertNoFailures(client().prepareBulk()
- .add(client().prepareIndex("test", "bar", "4").setSource("{}"))
- .add(client().prepareDelete("test", "bar", "3")).get());
+ .add(client().prepareIndex("test", "bar", "4").setSource("{}"))
+ .add(client().prepareDelete("test", "bar", "3")).get());
setDurability(shard, Translog.Durability.REQUEST);
assertTrue(shard.getEngine().getTranslog().syncNeeded());
}
@@ -384,7 +492,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
public void testMinimumCompatVersion() {
Version versionCreated = VersionUtils.randomVersion(random());
assertAcked(client().admin().indices().prepareCreate("test")
- .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0, SETTING_VERSION_CREATED, versionCreated.id));
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0, SETTING_VERSION_CREATED, versionCreated.id));
client().prepareIndex("test", "test").setSource("{}").get();
ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
@@ -398,7 +506,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
public void testUpdatePriority() {
assertAcked(client().admin().indices().prepareCreate("test")
- .setSettings(IndexMetaData.SETTING_PRIORITY, 200));
+ .setSettings(IndexMetaData.SETTING_PRIORITY, 200));
IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400).build()).get();
@@ -434,8 +542,8 @@ public class IndexShardTests extends ESSingleNodeTestCase {
Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10));
logger.info("--> idxPath: [{}]", idxPath);
Settings idxSettings = Settings.builder()
- .put(IndexMetaData.SETTING_DATA_PATH, idxPath)
- .build();
+ .put(IndexMetaData.SETTING_DATA_PATH, idxPath)
+ .build();
createIndex("test", idxSettings);
ensureGreen("test");
client().prepareIndex("test", "bar", "1").setSource("{}").setRefresh(true).get();
@@ -447,7 +555,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
public void testExpectedShardSizeIsPresent() throws InterruptedException {
assertAcked(client().admin().indices().prepareCreate("test")
- .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
for (int i = 0; i < 50; i++) {
client().prepareIndex("test", "test").setSource("{}").get();
}
@@ -475,11 +583,11 @@ public class IndexShardTests extends ESSingleNodeTestCase {
IOUtils.rm(endDir);
Settings sb = Settings.builder()
- .put(IndexMetaData.SETTING_DATA_PATH, startDir.toAbsolutePath().toString())
- .build();
+ .put(IndexMetaData.SETTING_DATA_PATH, startDir.toAbsolutePath().toString())
+ .build();
Settings sb2 = Settings.builder()
- .put(IndexMetaData.SETTING_DATA_PATH, endDir.toAbsolutePath().toString())
- .build();
+ .put(IndexMetaData.SETTING_DATA_PATH, endDir.toAbsolutePath().toString())
+ .build();
logger.info("--> creating an index with data_path [{}]", startDir.toAbsolutePath().toString());
createIndex(INDEX, sb);
@@ -510,9 +618,9 @@ public class IndexShardTests extends ESSingleNodeTestCase {
logger.info("--> updating settings...");
client().admin().indices().prepareUpdateSettings(INDEX)
- .setSettings(sb2)
- .setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true))
- .get();
+ .setSettings(sb2)
+ .setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true))
+ .get();
assert Files.exists(startDir) == false : "start dir shouldn't exist";
@@ -642,7 +750,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
try {
shard.index(index);
fail();
- }catch (IllegalIndexShardStateException e){
+ } catch (IllegalIndexShardStateException e) {
}
@@ -655,7 +763,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
try {
shard.delete(delete);
fail();
- }catch (IllegalIndexShardStateException e){
+ } catch (IllegalIndexShardStateException e) {
}
@@ -692,7 +800,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
long size = shard.getEngine().getTranslog().sizeInBytes();
logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES))
- .build()).get();
+ .build()).get();
client().prepareDelete("test", "test", "2").get();
logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
assertBusy(() -> { // this is async
@@ -877,7 +985,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
newShard.updateRoutingEntry(routing, false);
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode,
- localNode));
+ localNode));
assertTrue(newShard.recoverFromStore(localNode));
assertEquals(0, newShard.recoveryState().getTranslog().recoveredOperations());
assertEquals(0, newShard.recoveryState().getTranslog().totalOperations());
@@ -890,7 +998,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
assertHitCount(response, 0);
}
- public void testFailIfIndexNotPresentInRecoverFromStore() throws IOException {
+ public void testFailIfIndexNotPresentInRecoverFromStore() throws Exception {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
@@ -907,7 +1015,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
Store store = shard.store();
store.incRef();
test.removeShard(0, "b/c simon says so");
- Lucene.cleanLuceneIndex(store.directory());
+ cleanLuceneIndex(store.directory());
store.decRef();
ShardRoutingHelper.reinit(routing);
IndexShard newShard = test.createShard(routing);
@@ -940,7 +1048,12 @@ public class IndexShardTests extends ESSingleNodeTestCase {
newShard.updateRoutingEntry(routing, true);
SearchResponse response = client().prepareSearch().get();
assertHitCount(response, 0);
- client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(true).get();
+ // we can't issue this request through a client because of the inconsistencies we created with the cluster state
+ // doing it directly instead
+ IndexRequest request = client().prepareIndex("test", "test", "0").setSource("{}").request();
+ request.process(MetaData.builder().put(test.getMetaData(), false).build(), null, false, "test");
+ TransportIndexAction.executeIndexRequestOnPrimary(request, newShard, null);
+ newShard.refresh("test");
assertHitCount(client().prepareSearch().get(), 1);
}
@@ -999,7 +1112,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
@Override
public void restore(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) {
try {
- Lucene.cleanLuceneIndex(targetStore.directory());
+ cleanLuceneIndex(targetStore.directory());
for (String file : sourceStore.directory().listAll()) {
if (file.equals("write.lock") || file.startsWith("extra")) {
continue;
@@ -1205,12 +1318,12 @@ public class IndexShardTests extends ESSingleNodeTestCase {
public void testIndexingBufferDuringInternalRecovery() throws IOException {
createIndex("index");
client().admin().indices().preparePutMapping("index").setType("testtype").setSource(jsonBuilder().startObject()
- .startObject("testtype")
- .startObject("properties")
- .startObject("foo")
- .field("type", "text")
- .endObject()
- .endObject().endObject().endObject()).get();
+ .startObject("testtype")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "text")
+ .endObject()
+ .endObject().endObject().endObject()).get();
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("index"));
@@ -1234,12 +1347,12 @@ public class IndexShardTests extends ESSingleNodeTestCase {
public void testIndexingBufferDuringPeerRecovery() throws IOException {
createIndex("index");
client().admin().indices().preparePutMapping("index").setType("testtype").setSource(jsonBuilder().startObject()
- .startObject("testtype")
- .startObject("properties")
- .startObject("foo")
- .field("type", "text")
- .endObject()
- .endObject().endObject().endObject()).get();
+ .startObject("testtype")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "text")
+ .endObject()
+ .endObject().endObject().endObject()).get();
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("index"));
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java
index d105c98f4a..322ed269bc 100644
--- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java
@@ -40,6 +40,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
private static final int RELOCATION_COUNT = 25;
+ @TestLogging("_root:DEBUG,action.delete:TRACE,action.index:TRACE,index.shard:TRACE,cluster.service:TRACE")
public void testPrimaryRelocationWhileIndexing() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3));
client().admin().indices().prepareCreate("test")
diff --git a/core/src/test/java/org/elasticsearch/ingest/processor/TrackingResultProcessorTests.java b/core/src/test/java/org/elasticsearch/ingest/processor/TrackingResultProcessorTests.java
new file mode 100644
index 0000000000..e53eec56cf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/ingest/processor/TrackingResultProcessorTests.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ingest.processor;
+
+import org.elasticsearch.action.ingest.SimulateProcessorResult;
+import org.elasticsearch.ingest.TestProcessor;
+import org.elasticsearch.ingest.core.CompoundProcessor;
+import org.elasticsearch.ingest.core.IngestDocument;
+import org.elasticsearch.test.ESTestCase;
+import org.junit.Before;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.ingest.core.CompoundProcessor.ON_FAILURE_MESSAGE_FIELD;
+import static org.elasticsearch.ingest.core.CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD;
+import static org.elasticsearch.ingest.core.CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD;
+import static org.elasticsearch.ingest.processor.TrackingResultProcessor.decorate;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class TrackingResultProcessorTests extends ESTestCase {
+
+ private IngestDocument ingestDocument;
+ private List<SimulateProcessorResult> resultList;
+
+ @Before
+ public void init() {
+ ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>());
+ resultList = new ArrayList<>();
+ }
+
+ public void testActualProcessor() throws Exception {
+ TestProcessor actualProcessor = new TestProcessor(ingestDocument -> {});
+ TrackingResultProcessor trackingProcessor = new TrackingResultProcessor(actualProcessor, resultList);
+ trackingProcessor.execute(ingestDocument);
+
+ SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument);
+
+ assertThat(actualProcessor.getInvokedCounter(), equalTo(1));
+ assertThat(resultList.size(), equalTo(1));
+
+ assertThat(resultList.get(0).getIngestDocument(), equalTo(expectedResult.getIngestDocument()));
+ assertThat(resultList.get(0).getFailure(), nullValue());
+ assertThat(resultList.get(0).getProcessorTag(), equalTo(expectedResult.getProcessorTag()));
+ }
+
+ public void testActualCompoundProcessorWithoutOnFailure() throws Exception {
+ RuntimeException exception = new RuntimeException("processor failed");
+ TestProcessor testProcessor = new TestProcessor(ingestDocument -> { throw exception; });
+ CompoundProcessor actualProcessor = new CompoundProcessor(testProcessor);
+ CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
+
+ try {
+ trackingProcessor.execute(ingestDocument);
+ } catch (Exception e) {
+ assertThat(e.getMessage(), equalTo(exception.getMessage()));
+ }
+
+ SimulateProcessorResult expectedFirstResult = new SimulateProcessorResult(testProcessor.getTag(), ingestDocument);
+ assertThat(testProcessor.getInvokedCounter(), equalTo(1));
+ assertThat(resultList.size(), equalTo(1));
+ assertThat(resultList.get(0).getIngestDocument(), nullValue());
+ assertThat(resultList.get(0).getFailure(), equalTo(exception));
+ assertThat(resultList.get(0).getProcessorTag(), equalTo(expectedFirstResult.getProcessorTag()));
+ }
+
+ public void testActualCompoundProcessorWithOnFailure() throws Exception {
+ RuntimeException exception = new RuntimeException("fail");
+ TestProcessor failProcessor = new TestProcessor("fail", "test", ingestDocument -> { throw exception; });
+ TestProcessor onFailureProcessor = new TestProcessor("success", "test", ingestDocument -> {});
+ CompoundProcessor actualProcessor = new CompoundProcessor(
+ Arrays.asList(new CompoundProcessor(
+ Arrays.asList(failProcessor, onFailureProcessor),
+ Arrays.asList(onFailureProcessor, failProcessor))),
+ Arrays.asList(onFailureProcessor));
+ CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
+ trackingProcessor.execute(ingestDocument);
+
+ SimulateProcessorResult expectedFailResult = new SimulateProcessorResult(failProcessor.getTag(), ingestDocument);
+ SimulateProcessorResult expectedSuccessResult = new SimulateProcessorResult(onFailureProcessor.getTag(), ingestDocument);
+
+ assertThat(failProcessor.getInvokedCounter(), equalTo(2));
+ assertThat(onFailureProcessor.getInvokedCounter(), equalTo(2));
+ assertThat(resultList.size(), equalTo(4));
+
+ assertThat(resultList.get(0).getIngestDocument(), nullValue());
+ assertThat(resultList.get(0).getFailure(), equalTo(exception));
+ assertThat(resultList.get(0).getProcessorTag(), equalTo(expectedFailResult.getProcessorTag()));
+
+ Map<String, String> metadata = resultList.get(1).getIngestDocument().getIngestMetadata();
+ assertThat(metadata.get(ON_FAILURE_MESSAGE_FIELD), equalTo("fail"));
+ assertThat(metadata.get(ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo("test"));
+ assertThat(metadata.get(ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo("fail"));
+ assertThat(resultList.get(1).getFailure(), nullValue());
+ assertThat(resultList.get(1).getProcessorTag(), equalTo(expectedSuccessResult.getProcessorTag()));
+
+ assertThat(resultList.get(2).getIngestDocument(), nullValue());
+ assertThat(resultList.get(2).getFailure(), equalTo(exception));
+ assertThat(resultList.get(2).getProcessorTag(), equalTo(expectedFailResult.getProcessorTag()));
+
+ metadata = resultList.get(3).getIngestDocument().getIngestMetadata();
+ assertThat(metadata.get(ON_FAILURE_MESSAGE_FIELD), equalTo("fail"));
+ assertThat(metadata.get(ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo("compound"));
+ assertThat(metadata.get(ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo("CompoundProcessor-fail-success-success-fail"));
+ assertThat(resultList.get(3).getFailure(), nullValue());
+ assertThat(resultList.get(3).getProcessorTag(), equalTo(expectedSuccessResult.getProcessorTag()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java
index 4a12072da4..0d65bb2bf6 100644
--- a/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java
@@ -252,9 +252,8 @@ public abstract class BaseAggregationTestCase<AB extends AggregatorBuilder<AB>>
try (BytesStreamOutput output = new BytesStreamOutput()) {
testAgg.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
- AggregatorBuilder prototype = (AggregatorBuilder) namedWriteableRegistry.getPrototype(AggregatorBuilder.class,
- testAgg.getWriteableName());
- AggregatorBuilder deserializedQuery = prototype.readFrom(in);
+ AggregatorBuilder deserializedQuery = namedWriteableRegistry.getReader(AggregatorBuilder.class, testAgg.getWriteableName())
+ .read(in);
assertEquals(deserializedQuery, testAgg);
assertEquals(deserializedQuery.hashCode(), testAgg.hashCode());
assertNotSame(deserializedQuery, testAgg);
@@ -294,10 +293,8 @@ public abstract class BaseAggregationTestCase<AB extends AggregatorBuilder<AB>>
try (BytesStreamOutput output = new BytesStreamOutput()) {
agg.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
- AggregatorBuilder prototype = (AggregatorBuilder) namedWriteableRegistry.getPrototype(AggregatorBuilder.class,
- agg.getWriteableName());
@SuppressWarnings("unchecked")
- AB secondAgg = (AB) prototype.readFrom(in);
+ AB secondAgg = (AB) namedWriteableRegistry.getReader(AggregatorBuilder.class, agg.getWriteableName()).read(in);
return secondAgg;
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java
index dbe2714d05..fa8caaf0ed 100644
--- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java
+++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java
@@ -564,6 +564,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
.startObject("properties")
.startObject("field1").field("type", "text").field("store", true).field("index_options", "offsets")
.field("term_vector", "with_positions_offsets").endObject()
+ .startObject("field2").field("type", "text").endObject()
.endObject().endObject().endObject()));
ensureGreen();
diff --git a/core/src/test/java/org/elasticsearch/search/percolator/PercolatorQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/percolator/PercolatorQuerySearchIT.java
index 55f2ab8012..fc2a6d02b8 100644
--- a/core/src/test/java/org/elasticsearch/search/percolator/PercolatorQuerySearchIT.java
+++ b/core/src/test/java/org/elasticsearch/search/percolator/PercolatorQuerySearchIT.java
@@ -21,15 +21,17 @@ package org.elasticsearch.search.percolator;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
+import org.elasticsearch.index.query.MultiMatchQueryBuilder;
import org.elasticsearch.search.highlight.HighlightBuilder;
import org.elasticsearch.search.sort.SortOrder;
-import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESSingleNodeTestCase;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery;
import static org.elasticsearch.index.query.QueryBuilders.percolatorQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.hamcrest.Matchers.equalTo;
@@ -85,6 +87,32 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
assertThat(response.getHits().getAt(2).getId(), equalTo("3"));
}
+ public void testPercolatorSpecificQueries() throws Exception {
+ createIndex("test", client().admin().indices().prepareCreate("test")
+ .addMapping("type", "field1", "type=text", "field2", "type=text")
+ );
+
+ client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", commonTermsQuery("field1", "quick brown fox")).endObject())
+ .get();
+ client().prepareIndex("test", PercolatorFieldMapper.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", multiMatchQuery("quick brown fox", "field1", "field2")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)).endObject())
+ .get();
+ client().admin().indices().prepareRefresh().get();
+
+ BytesReference source = jsonBuilder().startObject()
+ .field("field1", "the quick brown fox jumps over the lazy dog")
+ .field("field2", "the quick brown fox falls down into the well")
+ .endObject().bytes();
+ SearchResponse response = client().prepareSearch()
+ .setQuery(percolatorQuery("type", source))
+ .get();
+ assertHitCount(response, 2);
+ assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(response.getHits().getAt(1).getId(), equalTo("2"));
+ }
+
public void testPercolatorQueryWithHighlighting() throws Exception {
createIndex("test", client().admin().indices().prepareCreate("test")
.addMapping("type", "field1", "type=text")
@@ -125,7 +153,7 @@ public class PercolatorQuerySearchIT extends ESSingleNodeTestCase {
assertThat(searchResponse.getHits().getAt(3).getHighlightFields().get("field1").fragments()[0].string(),
equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
assertThat(searchResponse.getHits().getAt(4).getHighlightFields().get("field1").fragments()[0].string(),
- equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));;
+ equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java
index e0673a64ee..0386dd847f 100644
--- a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescoreBuilderTests.java
@@ -69,7 +69,6 @@ public class QueryRescoreBuilderTests extends ESTestCase {
@BeforeClass
public static void init() {
namedWriteableRegistry = new NamedWriteableRegistry();
- namedWriteableRegistry.registerPrototype(RescoreBuilder.class, QueryRescorerBuilder.PROTOTYPE);
indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).buildQueryParserRegistry();
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
index 935dfb178b..7cc44f22a2 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java
@@ -100,10 +100,6 @@ public abstract class AbstractSortTestCase<T extends SortBuilder<T>> extends EST
};
namedWriteableRegistry = new NamedWriteableRegistry();
- namedWriteableRegistry.registerPrototype(SortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(SortBuilder.class, ScoreSortBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(SortBuilder.class, ScriptSortBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(SortBuilder.class, FieldSortBuilder.PROTOTYPE);
indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).buildQueryParserRegistry();
}
@@ -118,6 +114,9 @@ public abstract class AbstractSortTestCase<T extends SortBuilder<T>> extends EST
/** Returns mutated version of original so the returned sort is different in terms of equals/hashcode */
protected abstract T mutate(T original) throws IOException;
+ /** Parse the sort from xContent. Just delegate to the SortBuilder's static fromXContent method. */
+ protected abstract T fromXContent(QueryParseContext context, String fieldName) throws IOException;
+
/**
* Test that creates new sort from a random test sort and checks both for equality
*/
@@ -142,7 +141,7 @@ public abstract class AbstractSortTestCase<T extends SortBuilder<T>> extends EST
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry);
context.reset(itemParser);
- T parsedItem = testItem.fromXContent(context, elementName);
+ T parsedItem = fromXContent(context, elementName);
assertNotSame(testItem, parsedItem);
assertEquals(testItem, parsedItem);
assertEquals(testItem.hashCode(), parsedItem.hashCode());
@@ -255,10 +254,7 @@ public abstract class AbstractSortTestCase<T extends SortBuilder<T>> extends EST
try (BytesStreamOutput output = new BytesStreamOutput()) {
original.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
- T prototype = (T) namedWriteableRegistry.getPrototype(SortBuilder.class,
- original.getWriteableName());
- T copy = prototype.readFrom(in);
- return copy;
+ return (T) namedWriteableRegistry.getReader(SortBuilder.class, original.getWriteableName()).read(in);
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java
index f28ec8797c..f4d960aab0 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java
@@ -131,4 +131,9 @@ public class FieldSortBuilderTests extends AbstractSortTestCase<FieldSortBuilder
}
}
+
+ @Override
+ protected FieldSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException {
+ return FieldSortBuilder.fromXContent(context, fieldName);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java
index e63cb5ad29..995623ed0a 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java
@@ -243,12 +243,8 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase<GeoDistanc
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry);
context.reset(itemParser);
- try {
- GeoDistanceSortBuilder.PROTOTYPE.fromXContent(context, "");
- fail("sort mode sum should not be supported");
- } catch (IllegalArgumentException e) {
- // all good
- }
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> GeoDistanceSortBuilder.fromXContent(context, ""));
+ assertEquals("sort_mode [sum] isn't supported for sorting by geo distance", e.getMessage());
}
public void testGeoDistanceSortCanBeParsedFromGeoHash() throws IOException {
@@ -274,7 +270,7 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase<GeoDistanc
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry);
context.reset(itemParser);
- GeoDistanceSortBuilder result = GeoDistanceSortBuilder.PROTOTYPE.fromXContent(context, json);
+ GeoDistanceSortBuilder result = GeoDistanceSortBuilder.fromXContent(context, json);
assertEquals("[-19.700583312660456, -2.8225036337971687, "
+ "31.537466906011105, -74.63590376079082, "
+ "43.71844606474042, -5.548660643398762, "
@@ -282,4 +278,9 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase<GeoDistanc
+ "-69.44606635719538, 84.25200328230858, "
+ "-39.03717711567879, 44.74099852144718]", Arrays.toString(result.points()));
}
+
+ @Override
+ protected GeoDistanceSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException {
+ return GeoDistanceSortBuilder.fromXContent(context, fieldName);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java
index 6d605fd625..372d984539 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/ScoreSortBuilderTests.java
@@ -78,7 +78,7 @@ public class ScoreSortBuilderTests extends AbstractSortTestCase<ScoreSortBuilder
parser.nextToken();
context.reset(parser);
- ScoreSortBuilder scoreSort = ScoreSortBuilder.PROTOTYPE.fromXContent(context, "_score");
+ ScoreSortBuilder scoreSort = ScoreSortBuilder.fromXContent(context, "_score");
assertEquals(order, scoreSort.order());
}
@@ -107,4 +107,9 @@ public class ScoreSortBuilderTests extends AbstractSortTestCase<ScoreSortBuilder
assertEquals(SortField.Type.SCORE, sortField.getType());
assertEquals(builder.order() == SortOrder.DESC ? false : true, sortField.getReverse());
}
+
+ @Override
+ protected ScoreSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException {
+ return ScoreSortBuilder.fromXContent(context, fieldName);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
index 5e22667a9b..0e04dc9e4d 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java
@@ -180,7 +180,7 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase<ScriptSortBuild
parser.nextToken();
context.reset(parser);
- ScriptSortBuilder builder = ScriptSortBuilder.PROTOTYPE.fromXContent(context, null);
+ ScriptSortBuilder builder = ScriptSortBuilder.fromXContent(context, null);
assertEquals("doc['field_name'].value * factor", builder.script().getScript());
assertNull(builder.script().getLang());
assertEquals(1.1, builder.script().getParams().get("factor"));
@@ -211,7 +211,7 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase<ScriptSortBuild
parser.nextToken();
context.reset(parser);
- ScriptSortBuilder builder = ScriptSortBuilder.PROTOTYPE.fromXContent(context, null);
+ ScriptSortBuilder builder = ScriptSortBuilder.fromXContent(context, null);
assertEquals("doc['field_name'].value * factor", builder.script().getScript());
assertNull(builder.script().getLang());
assertEquals(1.1, builder.script().getParams().get("factor"));
@@ -235,7 +235,7 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase<ScriptSortBuild
context.reset(parser);
exceptionRule.expect(ParsingException.class);
exceptionRule.expectMessage("failed to parse field [bad_field]");
- ScriptSortBuilder.PROTOTYPE.fromXContent(context, null);
+ ScriptSortBuilder.fromXContent(context, null);
}
public void testParseBadFieldNameExceptionsOnStartObject() throws IOException {
@@ -251,7 +251,7 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase<ScriptSortBuild
context.reset(parser);
exceptionRule.expect(ParsingException.class);
exceptionRule.expectMessage("failed to parse field [bad_field]");
- ScriptSortBuilder.PROTOTYPE.fromXContent(context, null);
+ ScriptSortBuilder.fromXContent(context, null);
}
public void testParseUnexpectedToken() throws IOException {
@@ -267,7 +267,7 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase<ScriptSortBuild
context.reset(parser);
exceptionRule.expect(ParsingException.class);
exceptionRule.expectMessage("unexpected token [START_ARRAY]");
- ScriptSortBuilder.PROTOTYPE.fromXContent(context, null);
+ ScriptSortBuilder.fromXContent(context, null);
}
/**
@@ -279,4 +279,9 @@ public class ScriptSortBuilderTests extends AbstractSortTestCase<ScriptSortBuild
exceptionRule.expectMessage("script sort of type [string] doesn't support mode");
builder.sortMode(SortMode.fromString(randomFrom(new String[]{"avg", "median", "sum"})));
}
+
+ @Override
+ protected ScriptSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException {
+ return ScriptSortBuilder.fromXContent(context, fieldName);
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java
index b8100b5815..7d182a5a8b 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/SortBuilderTests.java
@@ -51,10 +51,6 @@ public class SortBuilderTests extends ESTestCase {
@BeforeClass
public static void init() {
namedWriteableRegistry = new NamedWriteableRegistry();
- namedWriteableRegistry.registerPrototype(SortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(SortBuilder.class, ScoreSortBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(SortBuilder.class, ScriptSortBuilder.PROTOTYPE);
- namedWriteableRegistry.registerPrototype(SortBuilder.class, FieldSortBuilder.PROTOTYPE);
indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).buildQueryParserRegistry();
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortOrderTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortOrderTests.java
index e505ec68e6..2de48decbd 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/SortOrderTests.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/SortOrderTests.java
@@ -38,7 +38,7 @@ public class SortOrderTests extends ESTestCase {
try (BytesStreamOutput out = new BytesStreamOutput()) {
unit.writeTo(out);
try (StreamInput in = StreamInput.wrap(out.bytes())) {
- assertThat("Roundtrip serialisation failed.", SortOrder.readOrderFrom(in), equalTo(unit));
+ assertThat("Roundtrip serialisation failed.", SortOrder.readFromStream(in), equalTo(unit));
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java
index 23d12fcfd0..e2c5411962 100644
--- a/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java
+++ b/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java
@@ -140,6 +140,6 @@ public class SortParserTests extends ESSingleNodeTestCase {
parser.setParseFieldMatcher(ParseFieldMatcher.STRICT);
parseContext.reset(parser);
parser.nextToken();
- GeoDistanceSortBuilder.PROTOTYPE.fromXContent(parseContext, null);
+ GeoDistanceSortBuilder.fromXContent(parseContext, null);
}
}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java
index 00009d1e76..c3890f0ead 100644
--- a/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java
+++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java
@@ -108,9 +108,7 @@ public abstract class SmoothingModelTestCase extends ESTestCase {
XContentParser parser = XContentHelper.createParser(contentBuilder.bytes());
context.reset(parser);
parser.nextToken(); // go to start token, real parsing would do that in the outer element parser
- SmoothingModel prototype = (SmoothingModel) namedWriteableRegistry.getPrototype(SmoothingModel.class,
- testModel.getWriteableName());
- SmoothingModel parsedModel = prototype.innerFromXContent(context);
+ SmoothingModel parsedModel = testModel.innerFromXContent(context);
assertNotSame(testModel, parsedModel);
assertEquals(testModel, parsedModel);
assertEquals(testModel.hashCode(), parsedModel.hashCode());
@@ -188,9 +186,7 @@ public abstract class SmoothingModelTestCase extends ESTestCase {
try (BytesStreamOutput output = new BytesStreamOutput()) {
original.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
- SmoothingModel prototype = (SmoothingModel) namedWriteableRegistry.getPrototype(SmoothingModel.class,
- original.getWriteableName());
- return prototype.readFrom(in);
+ return namedWriteableRegistry.getReader(SmoothingModel.class, original.getWriteableName()).read(in);
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java
index 95984da55f..897fa44b59 100644
--- a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java
+++ b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java
@@ -293,8 +293,9 @@ public class RandomShapeGenerator extends RandomGeoGenerator {
xDivisible(yRange.getMax()*10e3)/10e3);
}
+ /** creates a small random rectangle by default to keep shape test performance at bay */
public static Rectangle xRandomRectangle(Random r, Point nearP) {
- return xRandomRectangle(r, nearP, ctx.getWorldBounds(), false);
+ return xRandomRectangle(r, nearP, ctx.getWorldBounds(), true);
}
public static Rectangle xRandomRectangle(Random r, Point nearP, boolean small) {
diff --git a/distribution/src/main/packaging/scripts/postinst b/distribution/src/main/packaging/scripts/postinst
index 61ac5f27cd..4dd5bbf528 100644
--- a/distribution/src/main/packaging/scripts/postinst
+++ b/distribution/src/main/packaging/scripts/postinst
@@ -96,4 +96,8 @@ elif [ "$RESTART_ON_UPGRADE" = "true" ]; then
echo " OK"
fi
+chown -R $ES_USER:$ES_GROUP /var/lib/elasticsearch
+chown -R $ES_USER:$ES_GROUP /var/log/elasticsearch
+chown -R root:$ES_GROUP /etc/elasticsearch
+
${scripts.footer}
diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc
index 4c823119da..d294d1092b 100644
--- a/docs/reference/cluster.asciidoc
+++ b/docs/reference/cluster.asciidoc
@@ -46,3 +46,5 @@ include::cluster/nodes-stats.asciidoc[]
include::cluster/nodes-info.asciidoc[]
include::cluster/nodes-hot-threads.asciidoc[]
+
+include::cluster/allocation-explain.asciidoc[]
diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc
new file mode 100644
index 0000000000..dcca80dd61
--- /dev/null
+++ b/docs/reference/cluster/allocation-explain.asciidoc
@@ -0,0 +1,159 @@
+[[cluster-allocation-explain]]
+== Cluster Allocation Explain API
+
+The cluster allocation explanation API is designed to assist in answering the
+question "why is this shard unassigned?". To explain the allocation (on
+unassigned state) of a shard, issue a request like:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain' -d'{
+ "index": "myindex",
+ "shard": 0,
+ "primary": false
+}'
+--------------------------------------------------
+
+Specify the `index` and `shard` id of the shard you would like an explanation
+for, as well as the `primary` flag to indicate whether to explain a primary or
+replica shard.
+
+The response looks like:
+
+[source,js]
+--------------------------------------------------
+{
+ "shard" : {
+ "index" : "myindex",
+ "index_uuid" : "KnW0-zELRs6PK84l0r38ZA",
+ "id" : 0,
+ "primary" : false
+ },
+ "assigned" : false, <1>
+ "unassigned_info" : {
+ "reason" : "INDEX_CREATED", <2>
+ "at" : "2016-03-22T20:04:23.620Z"
+ },
+ "nodes" : { <3>
+ "V-Spi0AyRZ6ZvKbaI3691w" : {
+ "node_name" : "node1",
+ "node_attributes" : { <4>
+ "bar" : "baz"
+ },
+ "final_decision" : "NO", <5>
+ "weight" : 0.06666675, <6>
+ "decisions" : [ { <7>
+ "decider" : "filter",
+ "decision" : "NO",
+ "explanation" : "node does not match index include filters [foo:\"bar\"]"
+ } ]
+ },
+ "Qc6VL8c5RWaw1qXZ0Rg57g" : {
+ "node_name" : "node2",
+ "node_attributes" : {
+ "bar" : "baz",
+ "foo" : "bar"
+ },
+ "final_decision" : "NO",
+ "weight" : -1.3833332,
+ "decisions" : [ {
+ "decider" : "same_shard",
+ "decision" : "NO",
+ "explanation" : "the shard cannot be allocated on the same node id [Qc6VL8c5RWaw1qXZ0Rg57g] on which it already exists"
+ } ]
+ },
+ "PzdyMZGXQdGhqTJHF_hGgA" : {
+ "node_name" : "node3",
+ "node_attributes" : { },
+ "final_decision" : "NO",
+ "weight" : 2.3166666,
+ "decisions" : [ {
+ "decider" : "filter",
+ "decision" : "NO",
+ "explanation" : "node does not match index include filters [foo:\"bar\"]"
+ } ]
+ }
+ }
+}
+--------------------------------------------------
+<1> Whether the shard is assigned or unassigned
+<2> Reason for the shard originally becoming unassigned
+<3> List of node decisions about the shard
+<4> User-added attributes the node has
+<5> Final decision for whether the shard is allowed to be allocated to this node
+<6> Weight for how much the allocator would like to allocate the shard to this node
+<7> List of decisions factoring into final decision
+
+For a shard that is already assigned, the output looks similar to:
+
+[source,js]
+--------------------------------------------------
+{
+ "shard" : {
+ "index" : "only-foo",
+ "index_uuid" : "KnW0-zELRs6PK84l0r38ZA",
+ "id" : 0,
+ "primary" : true
+ },
+ "assigned" : true,
+ "assigned_node_id" : "Qc6VL8c5RWaw1qXZ0Rg57g", <1>
+ "nodes" : {
+ "V-Spi0AyRZ6ZvKbaI3691w" : {
+ "node_name" : "Susan Storm",
+ "node_attributes" : {
+ "bar" : "baz"
+ },
+ "final_decision" : "NO",
+ "weight" : 1.4499999,
+ "decisions" : [ {
+ "decider" : "filter",
+ "decision" : "NO",
+ "explanation" : "node does not match index include filters [foo:\"bar\"]"
+ } ]
+ },
+ "Qc6VL8c5RWaw1qXZ0Rg57g" : {
+ "node_name" : "Slipstream",
+ "node_attributes" : {
+ "bar" : "baz",
+ "foo" : "bar"
+ },
+ "final_decision" : "CURRENTLY_ASSIGNED", <2>
+ "weight" : 0.0,
+ "decisions" : [ {
+ "decider" : "same_shard",
+ "decision" : "NO",
+ "explanation" : "the shard cannot be allocated on the same node id [Qc6VL8c5RWaw1qXZ0Rg57g] on which it already exists"
+ } ]
+ },
+ "PzdyMZGXQdGhqTJHF_hGgA" : {
+ "node_name" : "The Symbiote",
+ "node_attributes" : { },
+ "final_decision" : "NO",
+ "weight" : 3.6999998,
+ "decisions" : [ {
+ "decider" : "filter",
+ "decision" : "NO",
+ "explanation" : "node does not match index include filters [foo:\"bar\"]"
+ } ]
+ }
+ }
+}
+--------------------------------------------------
+<1> Node the shard is currently assigned to
+<2> The decision is "CURRENTLY_ASSIGNED" because the shard is currently assigned to this node
+
+You can also have Elasticsearch explain the allocation of the first unassigned
+shard it finds by sending an empty body, such as:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain'
+--------------------------------------------------
+
+And if you would like to include all decisions that were factored into the final
+decision, the `include_yes_decisions` parameter will return all decisions:
+
+[source,js]
+--------------------------------------------------
+$ curl -XGET 'http://localhost:9200/_cluster/allocation/explain?include_yes_decisions=true'
+--------------------------------------------------
diff --git a/docs/reference/migration/migrate_5_0/java.asciidoc b/docs/reference/migration/migrate_5_0/java.asciidoc
index df526740f3..d7c580bd00 100644
--- a/docs/reference/migration/migrate_5_0/java.asciidoc
+++ b/docs/reference/migration/migrate_5_0/java.asciidoc
@@ -299,3 +299,8 @@ requests can now be validated at call time which results in much clearer errors.
==== ExplainRequestBuilder
The `setQuery(BytesReference)` method have been removed in favor of using `setQuery(QueryBuilder<?>)`
+
+=== ClusterStatsResponse
+
+Removed the `getMemoryAvailable` method from `OsStats`, which could be previously accessed calling
+`clusterStatsResponse.getNodesStats().getOs().getMemoryAvailable()`.
diff --git a/docs/reference/migration/migrate_5_0/rest.asciidoc b/docs/reference/migration/migrate_5_0/rest.asciidoc
index 590a097f02..0b8d76ea10 100644
--- a/docs/reference/migration/migrate_5_0/rest.asciidoc
+++ b/docs/reference/migration/migrate_5_0/rest.asciidoc
@@ -15,3 +15,8 @@ endpoint should be used in lieu of optimize.
The `GET` HTTP verb for `/_forcemerge` is no longer supported, please use the
`POST` HTTP verb.
+==== Removed `mem` section from `/_cluster/stats` response
+
+The `mem` section contained only one value, the total memory available
+throughout all nodes in the cluster. The section was removed as it didn't
+prove useful.
diff --git a/docs/reference/migration/migrate_5_0/settings.asciidoc b/docs/reference/migration/migrate_5_0/settings.asciidoc
index f9b104e2fc..5e524a6a3c 100644
--- a/docs/reference/migration/migrate_5_0/settings.asciidoc
+++ b/docs/reference/migration/migrate_5_0/settings.asciidoc
@@ -82,7 +82,7 @@ The `index.analysis.analyzer.default_index` analyzer is not supported anymore.
If you wish to change the analyzer to use for indexing, change the
`index.analysis.analyzer.default` analyzer instead.
-==== Ping timeout settings
+==== Ping settings
Previously, there were three settings for the ping timeout:
`discovery.zen.initial_ping_timeout`, `discovery.zen.ping.timeout` and
@@ -90,6 +90,11 @@ Previously, there were three settings for the ping timeout:
setting key for the ping timeout is now `discovery.zen.ping_timeout`. The
default value for ping timeouts remains at three seconds.
+
+`discovery.zen.master_election.filter_client` and `discovery.zen.master_election.filter_data` have
+been removed in favor of the new `discovery.zen.master_election.ignore_non_master_pings`. This setting control how ping responses
+are interpreted during master election and should be used with care and only in extreme cases. See documentation for details.
+
==== Recovery settings
Recovery settings deprecated in 1.x have been removed:
@@ -205,3 +210,11 @@ setting settings via `--name.of.setting value.of.setting`. This feature
has been removed. Instead, use
`-Ees.name.of.setting=value.of.setting`. Note that in all cases the
name of the setting must be prefixed with `es.`.
+
+==== Discovery Settings
+
+The `discovery.zen.minimum_master_node` must bet set for nodes that are bound
+to a non-loopback network interface. We see those nodes as in "production" mode and
+thus require the setting.
+
+
diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc
index 4c750fd0c1..59076fd387 100644
--- a/docs/reference/modules/discovery/zen.asciidoc
+++ b/docs/reference/modules/discovery/zen.asciidoc
@@ -56,11 +56,9 @@ serves as a protection against (partial) network failures where node may unjustl
think that the master has failed. In this case the node will simply hear from
other nodes about the currently active master.
-If `discovery.zen.master_election.filter_client` is `true`, pings from client nodes (nodes where `node.client` is
-`true`, or both `node.data` and `node.master` are `false`) are ignored during master election; the default value is
-`true`. If `discovery.zen.master_election.filter_data` is `true`, pings from non-master-eligible data nodes (nodes
-where `node.data` is `true` and `node.master` is `false`) are ignored during master election; the default value is
-`false`. Pings from master-eligible nodes are always observed during master election.
+If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from nodes that are not master
+eligible (nodes where `node.master` is `false`) are ignored during master election; the default value is
+`false`.
Nodes can be excluded from becoming a master by setting `node.master` to
`false`. Note, once a node is a client node (`node.client` set to
diff --git a/docs/reference/modules/painless.asciidoc b/docs/reference/modules/painless.asciidoc
index 7c2bcdd975..c52773b360 100644
--- a/docs/reference/modules/painless.asciidoc
+++ b/docs/reference/modules/painless.asciidoc
@@ -1,19 +1,21 @@
[[modules-scripting-painless]]
== Painless Scripting Language
-_Painless_ is a simple, secure scripting language built in to Elasticsearch as a module.
+experimental[The Painless scripting language is new and is still marked as experimental. The syntax or API may be changed in the future in non-backwards compatible ways if required.]
+
+_Painless_ is a simple, secure scripting language built in to Elasticsearch as a module.
It is designed specifically for use with Elasticsearch and can safely be used dynamically.
-A Painless script is essentially a single function. Painless does not provide support
-for defining multiple functions within a script. The Painless syntax is similar to
-http://groovy-lang.org/index.html[Groovy].
+A Painless script is essentially a single function. Painless does not provide support
+for defining multiple functions within a script. The Painless syntax is similar to
+http://groovy-lang.org/index.html[Groovy].
-You can use Painless anywhere a script can be used in Elasticsearch--simply set the `lang` parameter
+You can use Painless anywhere a script can be used in Elasticsearch--simply set the `lang` parameter
to `painless`.
[[painless-features]]
[float]
-=== Painless Features
+== Painless Features
* Control flow: `for` loops, `while` loops, `do/while` loops, `if/else`
@@ -32,196 +34,229 @@ to `painless`.
[[painless-examples]]
[float]
-=== Painless Examples
+== Painless Examples
To illustrate how Painless works, let's load some hockey stats into an Elasticsearch index:
[source,sh]
----------------------------------------------------------------
-curl -XDELETE http://localhost:9200/hockey-stats
-curl -XPUT http://localhost:9200/hockey-stats
-curl -XPUT http://localhost:9200/hockey-stats/player/1 -d '{"first":"johnny", "last":"gaudreau", "goals":[9, 27, 1], "assists":[17, 46, 0], "gp":[26, 82, 1]}'
-curl -XPUT http://localhost:9200/hockey-stats/player/2 -d '{"first":"sean", "last":"monohan", "goals":[7, 54, 26], "assists":[11, 26, 13], "gp":[26, 82, 82]}'
-curl -XPUT http://localhost:9200/hockey-stats/player/3 -d '{"first":"jiri", "last":"hudler", "goals":[5, 34, 36], "assists":[11, 62, 42], "gp":[24, 80, 79]}'
-curl -XPUT http://localhost:9200/hockey-stats/player/4 -d '{"first":"micheal", "last":"frolik", "goals":[4, 6, 15], "assists":[8, 23, 15], "gp":[26, 82, 82]}'
-curl -XPUT http://localhost:9200/hockey-stats/player/5 -d '{"first":"sam", "last":"bennett", "goals":[5, 0, 0], "assists":[8, 1, 0], "gp":[26, 1, 0]}'
-curl -XPUT http://localhost:9200/hockey-stats/player/6 -d '{"first":"dennis", "last":"wideman", "goals":[0, 26, 15], "assists":[11, 30, 24], "gp":[26, 81, 82]}'
-curl -XPUT http://localhost:9200/hockey-stats/player/7 -d '{"first":"david", "last":"jones", "goals":[7, 19, 5], "assists":[3, 17, 4], "gp":[26, 45, 34]}'
-curl -XPUT http://localhost:9200/hockey-stats/player/8 -d '{"first":"tj", "last":"brodie", "goals":[2, 14, 7], "assists":[8, 42, 30], "gp":[26, 82, 82]}'
-curl -XPUT http://localhost:9200/hockey-stats/player/9 -d '{"first":"mark", "last":"giordano", "goals":[6, 30, 15], "assists":[3, 30, 24], "gp":[26, 60, 63]}'
-curl -XPUT http://localhost:9200/hockey-stats/player/10 -d '{"first":"mikael", "last":"backlund", "goals":[3, 15, 13], "assists":[6, 24, 18], "gp":[26, 82, 82]}'
-curl -XPUT http://localhost:9200/hockey-stats/player/11 -d '{"first":"joe", "last":"colborne", "goals":[3, 18, 13], "assists":[6, 20, 24], "gp":[26, 67, 82]}'
+DELETE /hockey-stats
+
+PUT /hockey-stats/player/_bulk
+{"index":{"_id":1}}
+{"first":"johnny","last":"gaudreau","goals":[9,27,1],"assists":[17,46,0],"gp":[26,82,1]}
+{"index":{"_id":2}}
+{"first":"sean","last":"monohan","goals":[7,54,26],"assists":[11,26,13],"gp":[26,82,82]}
+{"index":{"_id":3}}
+{"first":"jiri","last":"hudler","goals":[5,34,36],"assists":[11,62,42],"gp":[24,80,79]}
+{"index":{"_id":4}}
+{"first":"micheal","last":"frolik","goals":[4,6,15],"assists":[8,23,15],"gp":[26,82,82]}
+{"index":{"_id":5}}
+{"first":"sam","last":"bennett","goals":[5,0,0],"assists":[8,1,0],"gp":[26,1,0]}
+{"index":{"_id":6}}
+{"first":"dennis","last":"wideman","goals":[0,26,15],"assists":[11,30,24],"gp":[26,81,82]}
+{"index":{"_id":7}}
+{"first":"david","last":"jones","goals":[7,19,5],"assists":[3,17,4],"gp":[26,45,34]}
+{"index":{"_id":8}}
+{"first":"tj","last":"brodie","goals":[2,14,7],"assists":[8,42,30],"gp":[26,82,82]}
+{"index":{"_id":39}}
+{"first":"mark","last":"giordano","goals":[6,30,15],"assists":[3,30,24],"gp":[26,60,63]}
+{"index":{"_id":10}}
+{"first":"mikael","last":"backlund","goals":[3,15,13],"assists":[6,24,18],"gp":[26,82,82]}
+{"index":{"_id":11}}
+{"first":"joe","last":"colborne","goals":[3,18,13],"assists":[6,20,24],"gp":[26,67,82]}
----------------------------------------------------------------
+// AUTOSENSE
[float]
-==== Accessing Doc Values from Painless
+=== Accessing Doc Values from Painless
-All Painless scripts take in a `Map<String,def>` of values called `input`. Document values can be accessed through another `Map<String,def>` within the `input` variable.
+All Painless scripts take in a `Map<String,def>` of values called `input`. Document values can be accessed through another `Map<String,def>` within the `input` variable.
-For example, the following script calculates a player's total goals. This example uses a strongly typed `int` and a `for` loop.
+For example, the following script calculates a player's total goals. This example uses a strongly typed `int` and a `for` loop.
[source,sh]
----------------------------------------------------------------
-curl -XGET http://localhost:9200/hockey-stats/_search -d '{
- "query": {
- "function_score": {
- "script_score" : {
- "script" : {
- "inline":
- "int total = 0; for (int i = 0; i < input.doc.goals.size(); ++i) { total += input.doc.goals[i]; } return total;",
- "lang": "painless"
- }
+GET /hockey-stats/_search
+{
+ "query": {
+ "function_score": {
+ "script_score": {
+ "script": {
+ "lang": "painless",
+ "inline": "int total = 0; for (int i = 0; i < input.doc.goals.size(); ++i) { total += input.doc.goals[i]; } return total;"
}
+ }
}
- }
-}'
+ }
+}
----------------------------------------------------------------
+// AUTOSENSE
Alternatively, you could do the same thing using a script field instead of a function score:
[source,sh]
----------------------------------------------------------------
-curl -XGET http://localhost:9200/hockey-stats/_search -d '{
- "query": {
- "match_all": {}},
- "script_fields": {
- "total_goals": {
- "script": {
- "inline": "int total = 0; for (int i = 0; i < input.doc.goals.size(); ++i) { total += input.doc.goals[i]; } return total;",
- "lang": "painless"
- }
- }
+GET /hockey-stats/_search
+{
+ "query": {
+ "match_all": {}
+ },
+ "script_fields": {
+ "total_goals": {
+ "script": {
+ "lang": "painless",
+ "inline": "int total = 0; for (int i = 0; i < input.doc.goals.size(); ++i) { total += input.doc.goals[i]; } return total;"
+ }
}
-}'
+ }
+}
----------------------------------------------------------------
+// AUTOSENSE
-You must always specify the index of the field value you want, even if there's only a single item in the field.
+You must always specify the index of the field value you want, even if there's only a single item in the field.
All fields in Elasticsearch are multi-valued and Painless does not provide a `.value` shortcut. The following example uses a Painless script to sort the players by their combined first and last names. The names are accessed using
-`input.doc.first.0` and `input.doc.last.0`.
+`input.doc.first.0` and `input.doc.last.0`.
[source,sh]
----------------------------------------------------------------
-curl -XGET http://localhost:9200/hockey-stats/_search -d '{
- "query" : {
- "match_all": {}},
- "sort" : {
- "_script" : {
- "type" : "string",
- "script" : {"inline": "input.doc.first.0 + \" \" + input.doc.last.0",
- "lang": "painless"},
- "order" : "asc"
- }
+GET /hockey-stats/_search
+{
+ "query": {
+ "match_all": {}
+ },
+ "sort": {
+ "_script": {
+ "type": "string",
+ "order": "asc",
+ "script": {
+ "lang": "painless",
+ "inline": "input.doc.first.0 + \" \" + input.doc.last.0"
+ }
}
-}'
+ }
+}
----------------------------------------------------------------
+// AUTOSENSE
[float]
-==== Updating Fields with Painless
+=== Updating Fields with Painless
-You can also easily update fields. You access the original source for a field as `input.ctx._source.<field-name>`.
+You can also easily update fields. You access the original source for a field as `input.ctx._source.<field-name>`.
First, let's look at the source data for a player by submitting the following request:
[source,sh]
----------------------------------------------------------------
-curl -XGET http://localhost:9200/hockey-stats/_search -d '{
- "fields" : ["_id", "_source"], "query" : {
- "term" : { "_id" : 1 }
+GET /hockey-stats/_search
+{
+ "fields": [
+ "_id",
+ "_source"
+ ],
+ "query": {
+ "term": {
+ "_id": 1
}
-}'
+ }
+}
----------------------------------------------------------------
+// AUTOSENSE
-To change player 1's last name to _hockey_, simply set `input.ctx._source.last` to the new value:
+To change player 1's last name to `hockey`, simply set `input.ctx._source.last` to the new value:
[source,sh]
----------------------------------------------------------------
-curl -XPOST http://localhost:9200/hockey-stats/player/1/_update -d '{
- "script": {
- "inline": "input.ctx._source.last = input.last",
- "params": {"last": "hockey"},
- "lang": "painless"
- }
-}'
+POST /hockey-stats/player/1/_update
+{
+ "script": {
+ "lang": "painless",
+ "inline": "input.ctx._source.last = input.last",
+ "params": {
+ "last": "hockey"
+ }
+ }
+}
----------------------------------------------------------------
+// AUTOSENSE
-You can also add fields to a document. For example, this script adds a new field that contains
+You can also add fields to a document. For example, this script adds a new field that contains
the player's nickname, _hockey_.
[source,sh]
----------------------------------------------------------------
-curl -XPOST http://localhost:9200/hockey-stats/player/1/_update -d '{
- "script": {
- "inline": "input.ctx._source.last = input.last input.ctx._source.nick = input.nick",
- "params": {"last": "gaudreau", "nick": "hockey"},
- "lang": "painless"
- }
-}'
+POST /hockey-stats/player/1/_update
+{
+ "script": {
+ "lang": "painless",
+ "inline": "input.ctx._source.last = input.last input.ctx._source.nick = input.nick",
+ "params": {
+ "last": "gaudreau",
+ "nick": "hockey"
+ }
+ }
+}
----------------------------------------------------------------
+// AUTOSENSE
[float]
-==== Writing Type-Safe Scripts to Improve Performance
+=== Writing Type-Safe Scripts to Improve Performance
-If you explicitly specify types, the compiler doesn't have to perform type lookups at runtime, which can significantly
-improve performance. For example, the following script performs the same first name, last name sort we showed before,
+If you explicitly specify types, the compiler doesn't have to perform type lookups at runtime, which can significantly
+improve performance. For example, the following script performs the same first name, last name sort we showed before,
but it's fully type-safe.
[source,sh]
----------------------------------------------------------------
-curl -XGET http://localhost:9200/hockey-stats/_search -d '{
- "query": {
- "match_all": {}
- },
- "script_fields": {
- "full_name_dynamic": {
- "script": {
- "inline": "def first = input.doc.first.0; def last = input.doc.last.0; return first + \" \" + last;",
- "lang": "painless"
- }
- },
- "full_name_static": {
- "script": {
- "inline":
- "String first = (String)((List)((Map)input.get(\"doc\")).get(\"first\")).get(0); String last = (String)((List)((Map)input.get(\"doc\")).get(\"last\")).get(0); return first + \" \" + last;",
- "lang": "painless"
- }
- }
+GET /hockey-stats/_search
+{
+ "query": {
+ "match_all": {}
+ },
+ "script_fields": {
+ "full_name_dynamic": {
+ "script": {
+ "lang": "painless",
+ "inline": "def first = input.doc.first.0; def last = input.doc.last.0; return first + \" \" + last;"
+ }
+ },
+ "full_name_static": {
+ "script": {
+ "lang": "painless",
+ "inline": "String first = (String)((List)((Map)input.get(\"doc\")).get(\"first\")).get(0); String last = (String)((List)((Map)input.get(\"doc\")).get(\"last\")).get(0); return first + \" \" + last;"
+ }
}
-}'
+ }
+}
----------------------------------------------------------------
+// AUTOSENSE
[[painless-api]]
[float]
-=== Painless API
+== Painless API
The following types are available for use in the Painless language. Most types and methods map directly to their Java equivalents--for more information, see the corresponding https://docs.oracle.com/javase/8/docs/api/java/lang/package-summary.html[Javadoc].
[float]
-==== Dynamic Types
+=== Dynamic Types
-`def` (This type can be used to represent any other type.)
+* `def` (This type can be used to represent any other type.)
[float]
-==== Basic Types
-
-`void`
-
-`boolean`
-
-`short`
-
-`char`
+=== Basic Types
-`int`
-
-`long`
-
-`float`
-
-`double`
+* `void`
+* `boolean`
+* `short`
+* `char`
+* `int`
+* `long`
+* `float`
+* `double`
[float]
-==== Complex Types
+=== Complex Types
Non-static methods/members in superclasses are available to subclasses.
Generic types with unspecified generic parameters are parameters of type `def`.
@@ -242,7 +277,7 @@ ArrayList<Object> extends List<Object>
-----
-----
-ArrayList<String> extends List<String>
+ArrayList<String> extends List<String>
<init>()
-----
@@ -254,13 +289,13 @@ Boolean extends Object
-----
-----
-Character extends Object
+Character extends Object
<init>(char)
static Character valueOf(char)
char charValue()
static char MIN_VALUE
static char MAX_VALUE
------
+-----
-----
CharSequence extends Object
@@ -311,7 +346,7 @@ Double extends Number
-----
Exception extends Object
- String getMessage()
+ String getMessage()
-----
-----
@@ -324,7 +359,7 @@ Float extends Number
-----
HashMap extends Map
- <init>()
+ <init>()
-----
-----
@@ -334,14 +369,14 @@ HashMap<Object,Object> extends Map<Object,Object>
-----
HashMap<String,def> extends Map<String,def>
- <init>()
+ <init>()
-----
-----
HashMap<String,Object> extends Map<String,Object>
<init>()
-----
-
+
-----
IllegalArgument extends Exception
<init>()
@@ -349,7 +384,7 @@ IllegalArgument extends Exception
-----
IllegalState extends Exception
- <init>()
+ <init>()
-----
-----
@@ -413,7 +448,7 @@ Map extends Object
boolean containsKey(def)
boolean containsValue(def)
Set keySet()
- Collection values()
+ Collection values()
-----
-----
@@ -549,7 +584,7 @@ Math
static double min(double, double)
static float fmin(float, float)
static long lmin(long, long)
- static int imin(int, int)
+ static int imin(int, int)
static double pow(double, double)
static double random()
static double rint(double)
diff --git a/docs/reference/query-dsl/template-query.asciidoc b/docs/reference/query-dsl/template-query.asciidoc
index 104adffe63..8015567145 100644
--- a/docs/reference/query-dsl/template-query.asciidoc
+++ b/docs/reference/query-dsl/template-query.asciidoc
@@ -106,7 +106,7 @@ GET /_search
}
}
------------------------------------------
-<1> Name of the query template in `config/scripts/`, i.e., `storedTemplate.mustache`.
+<1> Name of the query template in `config/scripts/`, i.e., `my_template.mustache`.
There is also a dedicated `template` endpoint, allows you to template an entire search request.
diff --git a/docs/reference/search/percolate.asciidoc b/docs/reference/search/percolate.asciidoc
index 44400f5b81..700c4ed392 100644
--- a/docs/reference/search/percolate.asciidoc
+++ b/docs/reference/search/percolate.asciidoc
@@ -1,8 +1,8 @@
[[search-percolate]]
== Percolator
-added[5.0.0,Percolator queries modifications aren't visible immediately and a refresh is required]
+deprecated[5.0.0,Percolate and multi percolate APIs are deprecated and have been replaced by the new <<query-dsl-percolator-query,`percolator` query>>]
-added[5.0.0,Percolate and multi percolate APIs have been deprecated and has been replaced by <<query-dsl-percolator-query, the new `percolator` query>>]
+added[5.0.0,Percolator query modifications only become visible after a refresh has occurred. Previously, they became visible immediately]
-added[5.0.0,For indices created on or after version 5.0.0 the percolator automatically indexes the query terms with the percolator queries this allows the percolator to percolate documents quicker. It is advisable to reindex any pre 5.0.0 indices to take advantage of this new optimization]
+added[5.0.0,For indices created on or after version 5.0.0-alpha1 the percolator automatically indexes the query terms with the percolator queries. This allows the percolator to percolate documents more quickly. It is advisable to reindex any pre 5.0.0 indices to take advantage of this new optimization]
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
index 17133a5de2..079dca2d4f 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java
@@ -69,8 +69,7 @@ public class BulkByScrollTask extends CancellableTask {
}
public static class Status implements Task.Status {
- public static final Status PROTOTYPE = new Status(0, 0, 0, 0, 0, 0, 0, 0, timeValueNanos(0), null);
-
+ public static final String NAME = "bulk-by-scroll";
private final long total;
private final long updated;
private final long created;
@@ -178,12 +177,7 @@ public class BulkByScrollTask extends CancellableTask {
@Override
public String getWriteableName() {
- return "bulk-by-scroll";
- }
-
- @Override
- public Status readFrom(StreamInput in) throws IOException {
- return new Status(in);
+ return NAME;
}
/**
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java
index 9ab025a252..9d84acf51c 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java
@@ -44,6 +44,6 @@ public class ReindexPlugin extends Plugin {
public void onModule(NetworkModule networkModule) {
networkModule.registerRestHandler(RestReindexAction.class);
networkModule.registerRestHandler(RestUpdateByQueryAction.class);
- networkModule.registerTaskStatus(BulkByScrollTask.Status.PROTOTYPE);
+ networkModule.registerTaskStatus(BulkByScrollTask.Status.NAME, BulkByScrollTask.Status::new);
}
}
diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexResponse.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexResponse.java
index 7e74fe26ec..0cd4c9cbc1 100644
--- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexResponse.java
+++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexResponse.java
@@ -46,7 +46,7 @@ public class ReindexResponse extends BulkIndexByScrollResponse {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.field("took", getTook());
+ builder.field("took", getTook().millis());
builder.field("timed_out", isTimedOut());
getStatus().innerXContent(builder, params, true, false);
builder.startArray("failures");
diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java
index 06f5226a7f..b734a555fa 100644
--- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java
+++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java
@@ -178,7 +178,12 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
* isn't counted as time that the last batch took.
*/
assertThat(action.getLastBatchStartTime(), greaterThanOrEqualTo(now));
- assertEquals(expectedHeaders, client.lastHeaders.get());
+
+ /*
+ * Also while we're here check that we preserved the headers from the last request. assertBusy because no requests might have
+ * come in yet.
+ */
+ assertBusy(() -> assertEquals(expectedHeaders, client.lastHeaders.get()));
}
}
diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml
index 413c8d1c14..2f79b01d3f 100644
--- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml
+++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yaml
@@ -22,7 +22,7 @@
- match: {batches: 1}
- match: {failures: []}
- match: {throttled_millis: 0}
- - is_true: took
+ - gte: { took: 0 }
- is_false: task
---
@@ -55,7 +55,7 @@
- match: {batches: 1}
- match: {failures: []}
- match: {throttled_millis: 0}
- - is_true: took
+ - gte: { took: 0 }
- is_false: task
---
@@ -133,7 +133,7 @@
- match: {failures.0.cause.reason: "/\\[foo\\]\\[1\\]:.version.conflict,.document.already.exists.\\(current.version.\\[\\d+\\]\\)/"}
- match: {failures.0.cause.shard: /\d+/}
- match: {failures.0.cause.index: dest}
- - is_true: took
+ - gte: { took: 0 }
---
"Response format for version conflict with conflicts=proceed":
@@ -167,7 +167,7 @@
- match: {batches: 1}
- match: {failures: []}
- match: {throttled_millis: 0}
- - is_true: took
+ - gte: { took: 0 }
---
"Simplest example in docs":
diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/80_throttle.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/80_throttle.yaml
index 2543670d5e..dc78dbbe70 100644
--- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/80_throttle.yaml
+++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/80_throttle.yaml
@@ -49,5 +49,5 @@
- match: {failures: []}
- gt: {throttled_millis: 1000}
- lt: {throttled_millis: 4000}
- - is_true: took
+ - gte: { took: 1000 }
- is_false: task
diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml
index bf54ac5584..5b4df1c923 100644
--- a/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml
+++ b/modules/reindex/src/test/resources/rest-api-spec/test/update-by-query/10_basic.yaml
@@ -19,7 +19,7 @@
- match: {failures: []}
- match: {noops: 0}
- match: {throttled_millis: 0}
- - is_true: took
+ - gte: { took: 0 }
- is_false: created # Update by query can't create
- is_false: task
@@ -93,7 +93,7 @@
- match: {failures.0.cause.reason: "/\\[foo\\]\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"}
- match: {failures.0.cause.shard: /\d+/}
- match: {failures.0.cause.index: test}
- - is_true: took
+ - gte: { took: 0 }
---
"Response for version conflict with conflicts=proceed":
@@ -128,7 +128,7 @@
- match: {noops: 0}
- match: {failures: []}
- match: {throttled_millis: 0}
- - is_true: took
+ - gte: { took: 0 }
---
"Limit by query":
@@ -158,7 +158,7 @@
- match: {version_conflicts: 0}
- match: {batches: 1}
- match: {failures: []}
- - is_true: took
+ - gte: { took: 0 }
---
"Limit by size":
@@ -186,7 +186,7 @@
- match: {batches: 1}
- match: {failures: []}
- match: {throttled_millis: 0}
- - is_true: took
+ - gte: { took: 0 }
---
"Can override scroll_size":
diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yaml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yaml
index 0afa99cc6c..27114ac61f 100644
--- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yaml
+++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yaml
@@ -274,3 +274,55 @@
id: 1
- length: { _source: 2 }
- match: { _source.values_flat: ["foo_bar", "foo_baz"] }
+
+---
+"Test verbose simulate with error context":
+ - do:
+ cluster.health:
+ wait_for_status: green
+ - do:
+ ingest.simulate:
+ verbose: true
+ body: >
+ {
+ "pipeline" : {
+ "description": "_description",
+ "processors": [
+ {
+ "rename" : {
+ "tag" : "rename-status",
+ "field" : "status",
+ "to" : "bar",
+ "on_failure" : [
+ {
+ "set" : {
+ "tag" : "set_on_rename_failure",
+ "field" : "error",
+ "value" : "processor {{ _ingest.on_failure_processor_tag }} [{{ _ingest.on_failure_processor_type }}]: {{ _ingest.on_failure_message }}"
+ }
+ }
+ ]
+ }
+ }
+ ]
+ },
+ "docs": [
+ {
+ "_index": "index",
+ "_type": "type",
+ "_id": "id",
+ "_source": {
+ "foo": "bar"
+ }
+ }
+ ]
+ }
+ - length: { docs: 1 }
+ - length: { docs.0.processor_results: 2 }
+ - match: { docs.0.processor_results.0.tag: "rename-status" }
+ - match: { docs.0.processor_results.0.error.type: "illegal_argument_exception" }
+ - match: { docs.0.processor_results.0.error.reason: "field [status] doesn't exist" }
+ - match: { docs.0.processor_results.1.tag: "set_on_rename_failure" }
+ - length: { docs.0.processor_results.1.doc._source: 2 }
+ - match: { docs.0.processor_results.1.doc._source.foo: "bar" }
+ - match: { docs.0.processor_results.1.doc._source.error: "processor rename-status [rename]: field [status] doesn't exist" }
diff --git a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/40_search_failures.yaml b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/40_search_failures.yaml
index 50442c2d51..2c649220b7 100644
--- a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/40_search_failures.yaml
+++ b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/reindex/40_search_failures.yaml
@@ -31,4 +31,4 @@
- match: {failures.0.reason.reason: "failed to run inline script [1/0] using lang [groovy]"}
- match: {failures.0.reason.caused_by.type: arithmetic_exception}
- match: {failures.0.reason.caused_by.reason: Division by zero}
- - is_true: took
+ - gte: { took: 0 }
diff --git a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/40_search_failure.yaml b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/40_search_failure.yaml
index 8f89409c58..9b8ba43f4a 100644
--- a/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/40_search_failure.yaml
+++ b/qa/smoke-test-reindex-with-groovy/src/test/resources/rest-api-spec/test/update-by-query/40_search_failure.yaml
@@ -27,4 +27,4 @@
- match: {failures.0.reason.reason: "failed to run inline script [1/0] using lang [groovy]"}
- match: {failures.0.reason.caused_by.type: arithmetic_exception}
- match: {failures.0.reason.caused_by.reason: Division by zero}
- - is_true: took
+ - gte: { took: 0 }
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats b/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats
index deeb37d54e..48bf2aca4e 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats
+++ b/qa/vagrant/src/test/resources/packaging/scripts/80_upgrade.bats
@@ -42,7 +42,6 @@ setup() {
@test "[UPGRADE] install old version" {
clean_before_test
install_package -v $(cat upgrade_from_version)
- perl -p -i -e 's/es.logger.level: INFO/es.logger.level: DEBUG/' /etc/elasticsearch/logging.yml
}
@test "[UPGRADE] start old version" {
@@ -81,7 +80,6 @@ setup() {
@test "[UPGRADE] install version under test" {
install_package -u
- perl -p -i -e 's/es.logger.level: INFO/es.logger.level: DEBUG/' /etc/elasticsearch/logging.yml
}
@test "[UPGRADE] start version under test" {
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/90_reinstall.bats b/qa/vagrant/src/test/resources/packaging/scripts/90_reinstall.bats
new file mode 100644
index 0000000000..3c2f7be733
--- /dev/null
+++ b/qa/vagrant/src/test/resources/packaging/scripts/90_reinstall.bats
@@ -0,0 +1,72 @@
+#!/usr/bin/env bats
+
+# Tests upgrading elasticsearch from a previous version with the deb or rpm
+# packages. Just uses a single node cluster on the current machine rather than
+# fancy rolling restarts.
+
+# WARNING: This testing file must be executed as root and can
+# dramatically change your system. It removes the 'elasticsearch'
+# user/group and also many directories. Do not execute this file
+# unless you know exactly what you are doing.
+
+# The test case can be executed with the Bash Automated
+# Testing System tool available at https://github.com/sstephenson/bats
+# Thanks to Sam Stephenson!
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Load test utilities
+load packaging_test_utils
+load os_package
+
+# Cleans everything for the 1st execution
+setup() {
+ skip_not_dpkg_or_rpm
+}
+
+@test "[REINSTALL] install" {
+ clean_before_test
+ install_package
+}
+
+@test "[REINSTALL] purge elasticsearch" {
+ purge_elasticsearch
+}
+
+@test "[REINSTALL] chown directories" {
+ # to simulate the loss of ownership
+ if [ -d /var/lib/elasticsearch ]; then
+ sudo chown -R root:root /var/lib/elasticsearch
+ fi
+ if [ -d "/var/log/elasticsearch" ]; then
+ sudo chown -R root:root /var/log/elasticsearch
+ fi
+ if [ -d /etc/elasticsearch ]; then
+ sudo chown -R root:root /etc/elasticsearch
+ fi
+}
+
+@test "[REINSTALL] reinstall elasticsearch" {
+ install_package
+}
+
+@test "[REINSTALL] check ownership" {
+ assert_recursive_ownership /var/lib/elasticsearch elasticsearch elasticsearch
+ assert_recursive_ownership /var/log/elasticsearch elasticsearch elasticsearch
+ assert_recursive_ownership /etc/elasticsearch root elasticsearch
+}
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash
index bcc0fd66f2..72c59c3932 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash
+++ b/qa/vagrant/src/test/resources/packaging/scripts/os_package.bash
@@ -48,6 +48,7 @@ install_package() {
case $opt in
u)
rpmCommand='-U'
+ dpkgCommand='--force-confnew'
;;
v)
version=$OPTARG
@@ -60,7 +61,7 @@ install_package() {
if is_rpm; then
rpm $rpmCommand elasticsearch-$version.rpm
elif is_dpkg; then
- dpkg -i elasticsearch-$version.deb
+ dpkg $dpkgCommand -i elasticsearch-$version.deb
else
skip "Only rpm or deb supported"
fi
diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash
index c8c1d6870a..133b6e16f1 100644
--- a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash
+++ b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash
@@ -209,6 +209,17 @@ assert_output() {
echo "$output" | grep -E "$1"
}
+assert_recursive_ownership() {
+ local directory=$1
+ local user=$2
+ local group=$3
+
+ realuser=$(find $directory -printf "%u\n" | sort | uniq)
+ [ "$realuser" = "$user" ]
+ realgroup=$(find $directory -printf "%g\n" | sort | uniq)
+ [ "$realgroup" = "$group" ]
+}
+
# Deletes everything before running a test file
clean_before_test() {
@@ -235,6 +246,22 @@ clean_before_test() {
# Kills all running Elasticsearch processes
ps aux | grep -i "org.elasticsearch.bootstrap.Elasticsearch" | awk {'print $2'} | xargs kill -9 > /dev/null 2>&1 || true
+ purge_elasticsearch
+
+ # Removes user & group
+ userdel elasticsearch > /dev/null 2>&1 || true
+ groupdel elasticsearch > /dev/null 2>&1 || true
+
+
+ # Removes all files
+ for d in "${ELASTICSEARCH_TEST_FILES[@]}"; do
+ if [ -e "$d" ]; then
+ rm -rf "$d"
+ fi
+ done
+}
+
+purge_elasticsearch() {
# Removes RPM package
if is_rpm; then
rpm --quiet -e elasticsearch > /dev/null 2>&1 || true
@@ -252,18 +279,6 @@ clean_before_test() {
if [ -x "`which apt-get 2>/dev/null`" ]; then
apt-get --quiet --yes purge elasticsearch > /dev/null 2>&1 || true
fi
-
- # Removes user & group
- userdel elasticsearch > /dev/null 2>&1 || true
- groupdel elasticsearch > /dev/null 2>&1 || true
-
-
- # Removes all files
- for d in "${ELASTICSEARCH_TEST_FILES[@]}"; do
- if [ -e "$d" ]; then
- rm -rf "$d"
- fi
- done
}
# Start elasticsearch and wait for it to come up with a status.
@@ -385,10 +400,6 @@ wait_for_elasticsearch_status() {
local desiredStatus=${1:-green}
local index=$2
- if [ -f /var/log/elasitcsearch/elasticsearch.log ]; then
- cat /var/log/elasticsearch/elasticsearch.log >&3
- fi
-
echo "Making sure elasticsearch is up..."
wget -O - --retry-connrefused --waitretry=1 --timeout=60 --tries 60 http://localhost:9200/_cluster/health || {
echo "Looks like elasticsearch never started. Here is its log:"
@@ -398,7 +409,6 @@ wait_for_elasticsearch_status() {
echo "The elasticsearch log doesn't exist. Maybe /var/log/messages has something:"
tail -n20 /var/log/messages
fi
- cat /var/log/elasticsearch/elasticsearch.log >&3
false
}
@@ -413,7 +423,6 @@ wait_for_elasticsearch_status() {
echo "Connected"
else
echo "Unable to connect to Elastisearch"
- cat /var/log/elasticsearch/elasticsearch.log >&3
false
fi
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json
index b54bee38c2..68468dd542 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json
@@ -8,6 +8,7 @@
"parts": {
"repository": {
"type" : "list",
+ "required": true,
"description": "Name of repository from which to fetch the snapshot information"
}
},
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json
new file mode 100644
index 0000000000..505c163497
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json
@@ -0,0 +1,20 @@
+{
+ "cluster.allocation_explain": {
+ "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html",
+ "methods": ["GET", "POST"],
+ "url": {
+ "path": "/_cluster/allocation/explain",
+ "paths": ["/_cluster/allocation/explain"],
+ "parts": {},
+ "params": {
+ "include_yes_decisions": {
+ "type": "boolean",
+ "description": "Return 'YES' decisions in explanation (default: false)"
+ }
+ }
+ },
+ "body": {
+ "description": "The index, shard, and primary flag to explain. Empty means 'explain the first unassigned shard'"
+ }
+ }
+}
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml
new file mode 100644
index 0000000000..0163ffae3e
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yaml
@@ -0,0 +1,76 @@
+---
+"cluster shard allocation explanation test":
+ - do:
+ # there aren't any unassigned shards to explain
+ catch: /unable to find any shards to explain/
+ cluster.allocation_explain: {}
+
+ - do:
+ indices.create:
+ index: test
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ cluster.state:
+ metric: [ master_node ]
+
+ - set: {master_node: node_id}
+
+ # This relies on there only being a single node in the test cluster, which
+ # is currently true, but if this changes in the future this test will need
+ # to be changed
+ - do:
+ cluster.allocation_explain:
+ body: { "index": "test", "shard": 0, "primary": true }
+
+ - match: { assigned: true }
+ # - match: { assigned_node_id: $node_id }
+ - is_true: assigned_node_id
+ - match: { shard.index: "test" }
+ - match: { shard.id: 0 }
+ - match: { shard.primary: true }
+ # unfortunately can't test these because they break with multi-node backwords
+ # compat REST tests
+ # - is_true: nodes.$node_id.node_name
+ # - match: { nodes.$node_id.node_attributes.testattr: "test" }
+ # - match: { nodes.$node_id.node_attributes.portsfile: "true" }
+ # - match: { nodes.$node_id.final_decision: "CURRENTLY_ASSIGNED" }
+ # - match: { nodes.$node_id.weight: 0.0 }
+ # - match: { nodes.$node_id.decisions.0.decider: "same_shard" }
+ # - match: { nodes.$node_id.decisions.0.decision: "NO" }
+
+---
+"cluster shard allocation explanation test with empty request":
+ - do:
+ indices.create:
+ index: test
+ body: { "index.number_of_shards": 1, "index.number_of_replicas": 9 }
+
+ - do:
+ cluster.health:
+ wait_for_status: yellow
+
+ - do:
+ cluster.state:
+ metric: [ master_node ]
+
+ - set: {master_node: node_id}
+
+ - do:
+ cluster.allocation_explain: {}
+
+ - match: { assigned: false }
+ - match: { unassigned_info.reason: "INDEX_CREATED" }
+ - is_true: unassigned_info.at
+ - match: { shard.index: "test" }
+ - match: { shard.id: 0 }
+ - match: { shard.primary: false }
+ # - is_true: nodes.$node_id.node_name
+ # - match: { nodes.$node_id.node_attributes.testattr: "test" }
+ # - match: { nodes.$node_id.node_attributes.portsfile: "true" }
+ # - match: { nodes.$node_id.final_decision: "NO" }
+ # - match: { nodes.$node_id.decisions.0.decider: "same_shard" }
+ # - match: { nodes.$node_id.decisions.0.decision: "NO" }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml
index ade3b68e25..f81cb23469 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml
@@ -27,7 +27,7 @@
filter_path: "*"
body: "{ query: { match_all: {} } }"
- - is_true: took
+ - gte: { took: 0 }
- is_true: _shards.total
- is_true: hits.total
- is_true: hits.hits.0._index
@@ -84,4 +84,3 @@
- is_true: hits.hits.1._index
- is_false: hits.hits.1._type
- is_true: hits.hits.1._id
-
diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
index b7984416fa..d15773db54 100644
--- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
+++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
@@ -30,7 +30,7 @@ import org.elasticsearch.test.ESTestCase;
public class TestShardRouting {
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state) {
- return newShardRouting(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE), shardId, currentNodeId,primary, state);
+ return newShardRouting(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE), shardId, currentNodeId, primary, state);
}
public static ShardRouting newShardRouting(Index index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state) {
diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
index 207593725d..1c95372f94 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
@@ -30,6 +30,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
@@ -1837,7 +1838,8 @@ public final class InternalTestCluster extends TestCluster {
}
NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node);
- NodeStats stats = nodeService.stats(CommonStatsFlags.ALL, false, false, false, false, false, false, false, false, false, false, false);
+ CommonStatsFlags flags = new CommonStatsFlags(Flag.FieldData, Flag.QueryCache, Flag.Segments);
+ NodeStats stats = nodeService.stats(flags, false, false, false, false, false, false, false, false, false, false, false);
assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0L));
assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0L));
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0L));
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
index ba1b99288e..fa9c5cf099 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
@@ -73,7 +73,7 @@ public class RestTestSuiteParser implements RestTestFragmentParser<RestTestSuite
XContentParser parser = parseContext.parser();
parser.nextToken();
- assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT : "expected token to be START_OBJECT but was " + parser.currentToken();
RestTestSuite restTestSuite = new RestTestSuite(parseContext.getApi(), parseContext.getSuiteName());